comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
It would be nice to standardise between 'namespace' and 'namespaceName', which is used interchangeably in a few files I've reviewed so far. I suspect everything should be referred to as 'namespace'.
public String getNamespace() { return namespaceName; }
}
public String getNamespace() { return namespace; }
class ErrorContext implements Serializable { static final String MESSAGE_PARAMETER_DELIMITER = ", "; private static final long serialVersionUID = -2819764407122954922L; private final String namespaceName; /** * Creates a new instance with the provided {@code namespaceName}. * * @param namespaceName The service namespace of the error. * @throws IllegalArgumentException when {@code namespaceName} is {@code null} or empty. */ public ErrorContext(String namespaceName) { if (ImplUtils.isNullOrEmpty(namespaceName)) { throw new IllegalArgumentException("'namespaceName' cannot be null or empty"); } this.namespaceName = namespaceName; } /** * Gets the namespace for this error. * * @return The namespace for this error. */ /** * Creates a string representation of this ErrorContext. * * @return A string representation of this ErrorContext. */ @Override public String toString() { return String.format(Locale.US, "NAMESPACE: %s", getNamespace()); } }
class ErrorContext implements Serializable { static final String MESSAGE_PARAMETER_DELIMITER = ", "; private static final long serialVersionUID = -2819764407122954922L; private final String namespace; /** * Creates a new instance with the provided {@code namespace}. * * @param namespace The service namespace of the error. * @throws IllegalArgumentException when {@code namespace} is {@code null} or empty. */ public ErrorContext(String namespace) { if (ImplUtils.isNullOrEmpty(namespace)) { throw new IllegalArgumentException("'namespace' cannot be null or empty"); } this.namespace = namespace; } /** * Gets the namespace for this error. * * @return The namespace for this error. */ /** * Creates a string representation of this ErrorContext. * * @return A string representation of this ErrorContext. */ @Override public String toString() { return String.format(Locale.US, "NAMESPACE: %s", getNamespace()); } }
I removed this line because after adding the default case, this line can never be reached. Let me know if there is concern here.
public Single<HttpResponse> sendAsync(HttpRequest request) { this.factory.tryNumber++; if (this.factory.tryNumber > this.factory.options.maxTries()) { throw new IllegalArgumentException("Try number has exceeded max tries"); } String expectedHost = RETRY_TEST_PRIMARY_HOST; if (this.factory.tryNumber % 2 == 0) { /* Special cases: retry until success scenario fail's on the 4th try with a 404 on the secondary, so we never expect it to check the secondary after that. All other tests should continue to check the secondary. Exponential timing only tests secondary backoff once but uses the rest of the retries to hit the max delay. */ if (!((this.factory.retryTestScenario == RequestRetryTestFactory.RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS && this.factory.tryNumber > 4) || (this.factory.retryTestScenario == RequestRetryTestFactory.RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING && this.factory.tryNumber > 2))) { expectedHost = RETRY_TEST_SECONDARY_HOST; } } if (!request.url().getHost().equals(expectedHost)) { throw new IllegalArgumentException("The host does not match the expected host"); } /* This policy will add test headers and query parameters. Ensure they are removed/reset for each retry. The retry policy should be starting with a fresh copy of the request for every try. */ if (request.headers().value(RETRY_TEST_HEADER) != null) { throw new IllegalArgumentException("Headers not reset."); } if ((request.url().getQuery() != null && request.url().getQuery().contains(RETRY_TEST_QUERY_PARAM))) { throw new IllegalArgumentException("Query params not reset."); } if (FlowableUtil.collectBytesInBuffer(request.body()).blockingGet() .compareTo(RETRY_TEST_DEFAULT_DATA) != 0) { throw new IllegalArgumentException(("Body not reset.")); } /* Modify the request as policies downstream of the retry policy are likely to do. These must be reset on each try. */ request.headers().set(RETRY_TEST_HEADER, "testheader"); UrlBuilder builder = UrlBuilder.parse(request.url()); builder.setQueryParameter(RETRY_TEST_QUERY_PARAM, "testquery"); try { request.withUrl(builder.toURL()); } catch (MalformedURLException e) { throw new IllegalArgumentException("The URL has been mangled"); } switch (this.factory.retryTestScenario) { case RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS: switch (this.factory.tryNumber) { case 1: /* The timer is set with a timeout on the Single used to make the request. If the single doesn't return success fast enough, it will throw a TimeoutException. We can short circuit the waiting by simply returning an error. We will validate the time parameter later. Here, we just test that a timeout is retried. */ return Single.error(new TimeoutException()); case 2: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 3: return RETRY_TEST_TIMEOUT_ERROR_RESPONSE; case 4: /* By returning 404 when we should be testing against the secondary, we exercise the logic that should prevent further tries to secondary when the secondary evidently doesn't have the data. */ return RETRY_TEST_NOT_FOUND_RESPONSE; case 5: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 6: return RETRY_TEST_OK_RESPONSE; default: throw new IllegalArgumentException("Continued trying after success."); } case RETRY_TEST_SCENARIO_RETRY_UNTIL_MAX_RETRIES: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case RETRY_TEST_SCENARIO_NON_RETRYABLE: switch (this.factory.tryNumber) { case 1: return RETRY_TEST_NON_RETRYABLE_ERROR; default: throw new IllegalArgumentException("Continued trying after non retryable error."); } case RETRY_TEST_SCENARIO_NON_RETRYABLE_SECONDARY: switch (this.factory.tryNumber) { case 1: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: return RETRY_TEST_NON_RETRYABLE_ERROR; default: throw new IllegalArgumentException("Continued trying after non retryable error."); } case RETRY_TEST_SCENARIO_NETWORK_ERROR: switch (this.factory.tryNumber) { case 1: case 2: return Single.error(new IOException()); case 3: return RETRY_TEST_OK_RESPONSE; default: throw new IllegalArgumentException("Continued retrying after success."); } case RETRY_TEST_SCENARIO_TRY_TIMEOUT: switch (this.factory.tryNumber) { case 1: return RETRY_TEST_OK_RESPONSE.delay(options.tryTimeout() + 1, TimeUnit.SECONDS); case 2: return RETRY_TEST_OK_RESPONSE.delay(options.tryTimeout() + 1, TimeUnit.SECONDS); case 3: return RETRY_TEST_OK_RESPONSE.delay(options.tryTimeout() - 1, TimeUnit.SECONDS); default: throw new IllegalArgumentException("Continued retrying after success"); } case RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING: switch (this.factory.tryNumber) { case 1: this.factory.time = OffsetDateTime.now(); return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: /* Calculation for secondary is always the same, so we don't need to keep testing it. Not trying the secondary any more will also speed up the test. */ return testDelayBounds(1, false, RETRY_TEST_NOT_FOUND_RESPONSE); case 3: return testDelayBounds(2, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 4: return testDelayBounds(3, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 5: /* With the current configuration in RetryTest, the maxRetryDelay should be reached upon the fourth try to the primary. */ return testMaxDelayBounds(RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 6: return testMaxDelayBounds(RETRY_TEST_OK_RESPONSE); default: throw new IllegalArgumentException("Max retries exceeded/continued retrying after success"); } case RETRY_TEST_SCENARIO_FIXED_TIMING: switch (this.factory.tryNumber) { case 1: this.factory.time = OffsetDateTime.now(); return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: return testDelayBounds(1, false, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 3: return testDelayBounds(2, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 4: /* Fixed backoff means it's always the same and we never hit the max, no need to keep testing. */ return RETRY_TEST_OK_RESPONSE; default: throw new IllegalArgumentException("Retries continued after success."); } case RETRY_TEST_SCENARIO_NON_REPLAYABLE_FLOWABLE: switch (this.factory.tryNumber) { case 1: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: return Single.error(new UnexpectedLengthException("Unexpected length", 5, 6)); default: throw new IllegalArgumentException("Retries continued on non retryable error."); } default: throw new IllegalArgumentException("Invalid retry test scenario."); } }
throw new IllegalArgumentException("Invalid retry test scenario.");
public Single<HttpResponse> sendAsync(HttpRequest request) { this.factory.tryNumber++; if (this.factory.tryNumber > this.factory.options.maxTries()) { throw new IllegalArgumentException("Try number has exceeded max tries"); } String expectedHost = RETRY_TEST_PRIMARY_HOST; if (this.factory.tryNumber % 2 == 0) { /* Special cases: retry until success scenario fail's on the 4th try with a 404 on the secondary, so we never expect it to check the secondary after that. All other tests should continue to check the secondary. Exponential timing only tests secondary backoff once but uses the rest of the retries to hit the max delay. */ if (!((this.factory.retryTestScenario == RequestRetryTestFactory.RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS && this.factory.tryNumber > 4) || (this.factory.retryTestScenario == RequestRetryTestFactory.RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING && this.factory.tryNumber > 2))) { expectedHost = RETRY_TEST_SECONDARY_HOST; } } if (!request.url().getHost().equals(expectedHost)) { throw new IllegalArgumentException("The host does not match the expected host"); } /* This policy will add test headers and query parameters. Ensure they are removed/reset for each retry. The retry policy should be starting with a fresh copy of the request for every try. */ if (request.headers().value(RETRY_TEST_HEADER) != null) { throw new IllegalArgumentException("Headers not reset."); } if ((request.url().getQuery() != null && request.url().getQuery().contains(RETRY_TEST_QUERY_PARAM))) { throw new IllegalArgumentException("Query params not reset."); } if (FlowableUtil.collectBytesInBuffer(request.body()).blockingGet() .compareTo(RETRY_TEST_DEFAULT_DATA) != 0) { throw new IllegalArgumentException(("Body not reset.")); } /* Modify the request as policies downstream of the retry policy are likely to do. These must be reset on each try. */ request.headers().set(RETRY_TEST_HEADER, "testheader"); UrlBuilder builder = UrlBuilder.parse(request.url()); builder.setQueryParameter(RETRY_TEST_QUERY_PARAM, "testquery"); try { request.withUrl(builder.toURL()); } catch (MalformedURLException e) { throw new IllegalArgumentException("The URL has been mangled"); } switch (this.factory.retryTestScenario) { case RETRY_TEST_SCENARIO_RETRY_UNTIL_SUCCESS: switch (this.factory.tryNumber) { case 1: /* The timer is set with a timeout on the Single used to make the request. If the single doesn't return success fast enough, it will throw a TimeoutException. We can short circuit the waiting by simply returning an error. We will validate the time parameter later. Here, we just test that a timeout is retried. */ return Single.error(new TimeoutException()); case 2: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 3: return RETRY_TEST_TIMEOUT_ERROR_RESPONSE; case 4: /* By returning 404 when we should be testing against the secondary, we exercise the logic that should prevent further tries to secondary when the secondary evidently doesn't have the data. */ return RETRY_TEST_NOT_FOUND_RESPONSE; case 5: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 6: return RETRY_TEST_OK_RESPONSE; default: throw new IllegalArgumentException("Continued trying after success."); } case RETRY_TEST_SCENARIO_RETRY_UNTIL_MAX_RETRIES: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case RETRY_TEST_SCENARIO_NON_RETRYABLE: switch (this.factory.tryNumber) { case 1: return RETRY_TEST_NON_RETRYABLE_ERROR; default: throw new IllegalArgumentException("Continued trying after non retryable error."); } case RETRY_TEST_SCENARIO_NON_RETRYABLE_SECONDARY: switch (this.factory.tryNumber) { case 1: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: return RETRY_TEST_NON_RETRYABLE_ERROR; default: throw new IllegalArgumentException("Continued trying after non retryable error."); } case RETRY_TEST_SCENARIO_NETWORK_ERROR: switch (this.factory.tryNumber) { case 1: case 2: return Single.error(new IOException()); case 3: return RETRY_TEST_OK_RESPONSE; default: throw new IllegalArgumentException("Continued retrying after success."); } case RETRY_TEST_SCENARIO_TRY_TIMEOUT: switch (this.factory.tryNumber) { case 1: return RETRY_TEST_OK_RESPONSE.delay(options.tryTimeout() + 1, TimeUnit.SECONDS); case 2: return RETRY_TEST_OK_RESPONSE.delay(options.tryTimeout() + 1, TimeUnit.SECONDS); case 3: return RETRY_TEST_OK_RESPONSE.delay(options.tryTimeout() - 1, TimeUnit.SECONDS); default: throw new IllegalArgumentException("Continued retrying after success"); } case RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING: switch (this.factory.tryNumber) { case 1: this.factory.time = OffsetDateTime.now(); return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: /* Calculation for secondary is always the same, so we don't need to keep testing it. Not trying the secondary any more will also speed up the test. */ return testDelayBounds(1, false, RETRY_TEST_NOT_FOUND_RESPONSE); case 3: return testDelayBounds(2, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 4: return testDelayBounds(3, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 5: /* With the current configuration in RetryTest, the maxRetryDelay should be reached upon the fourth try to the primary. */ return testMaxDelayBounds(RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 6: return testMaxDelayBounds(RETRY_TEST_OK_RESPONSE); default: throw new IllegalArgumentException("Max retries exceeded/continued retrying after success"); } case RETRY_TEST_SCENARIO_FIXED_TIMING: switch (this.factory.tryNumber) { case 1: this.factory.time = OffsetDateTime.now(); return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: return testDelayBounds(1, false, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 3: return testDelayBounds(2, true, RETRY_TEST_TEMPORARY_ERROR_RESPONSE); case 4: /* Fixed backoff means it's always the same and we never hit the max, no need to keep testing. */ return RETRY_TEST_OK_RESPONSE; default: throw new IllegalArgumentException("Retries continued after success."); } case RETRY_TEST_SCENARIO_NON_REPLAYABLE_FLOWABLE: switch (this.factory.tryNumber) { case 1: return RETRY_TEST_TEMPORARY_ERROR_RESPONSE; case 2: return Single.error(new UnexpectedLengthException("Unexpected length", 5, 6)); default: throw new IllegalArgumentException("Retries continued on non retryable error."); } default: throw new IllegalArgumentException("Invalid retry test scenario."); } }
class RetryTestPolicy implements RequestPolicy { private RequestRetryTestFactory factory; RetryTestPolicy(RequestRetryTestFactory parent) { this.factory = parent; } @Override /* Calculate the delay in seconds. Round up to ensure we include the maximum value and some offset for the code executing between the original calculation in the retry policy and this check. */ private long calcPrimaryDelay(int tryNumber) { switch (this.factory.retryTestScenario) { case RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING: return (long) Math.ceil( ((pow(2L, tryNumber - 1) - 1L) * this.factory.options.retryDelayInMs()) / 1000); case RETRY_TEST_SCENARIO_FIXED_TIMING: return (long) Math.ceil(this.factory.options.retryDelayInMs() / 1000); default: throw new IllegalArgumentException("Invalid test scenario"); } } private OffsetDateTime calcUpperBound(OffsetDateTime start, int primaryTryNumber, boolean tryingPrimary) { if (tryingPrimary) { return start.plus(calcPrimaryDelay(primaryTryNumber) * 1000 + 500, ChronoUnit.MILLIS); } else { return start.plus(1400, ChronoUnit.MILLIS); } } private OffsetDateTime calcLowerBound(OffsetDateTime start, int primaryTryNumber, boolean tryingPrimary) { if (tryingPrimary) { return start.plus(calcPrimaryDelay(primaryTryNumber) * 1000 - 500, ChronoUnit.MILLIS); } else { return start.plus(700, ChronoUnit.MILLIS); } } private Single<HttpResponse> testDelayBounds(int primaryTryNumber, boolean tryingPrimary, Single<HttpResponse> response) { /* We have to return a new Single so that the calculation for time is performed at the correct time, i.e. when the Single is actually subscribed to. This mocks an HttpClient because the requests are made only when the Single is subscribed to, not when all the infrastructure around it is put in place, and we care about the delay before the request itself. */ return new Single<HttpResponse>() { @Override protected void subscribeActual(SingleObserver<? super HttpResponse> observer) { try { if (OffsetDateTime.now().isAfter(calcUpperBound(factory.time, primaryTryNumber, tryingPrimary)) || OffsetDateTime.now() .isBefore(calcLowerBound(factory.time, primaryTryNumber, tryingPrimary))) { throw new IllegalArgumentException("Delay was not within jitter bounds"); } factory.time = OffsetDateTime.now(); /* We can blocking get because it's not actually an IO call. Everything returned here returns Single.just(response). */ HttpResponse unwrappedResponse = response.blockingGet(); observer.onSuccess(unwrappedResponse); } catch (StorageErrorException | IllegalArgumentException e) { observer.onError(e); } } }; } private Single<HttpResponse> testMaxDelayBounds(Single<HttpResponse> response) { return new Single<HttpResponse>() { @Override protected void subscribeActual(SingleObserver<? super HttpResponse> observer) { try { if (OffsetDateTime.now().isAfter(factory.time.plusSeconds( (long) Math.ceil((factory.options.maxRetryDelayInMs() / 1000) + 1)))) { throw new IllegalArgumentException("Max retry delay exceeded"); } else if (OffsetDateTime.now().isBefore(factory.time.plusSeconds( (long) Math.ceil((factory.options.maxRetryDelayInMs() / 1000) - 1)))) { throw new IllegalArgumentException("Retry did not delay long enough"); } factory.time = OffsetDateTime.now(); HttpResponse unwrappedResponse = response.blockingGet(); observer.onSuccess(unwrappedResponse); } catch (StorageErrorException | IllegalArgumentException e) { observer.onError(e); } } }; } }
class RetryTestPolicy implements RequestPolicy { private RequestRetryTestFactory factory; RetryTestPolicy(RequestRetryTestFactory parent) { this.factory = parent; } @Override /* Calculate the delay in seconds. Round up to ensure we include the maximum value and some offset for the code executing between the original calculation in the retry policy and this check. */ private long calcPrimaryDelay(int tryNumber) { switch (this.factory.retryTestScenario) { case RETRY_TEST_SCENARIO_EXPONENTIAL_TIMING: return (long) Math.ceil( ((pow(2L, tryNumber - 1) - 1L) * this.factory.options.retryDelayInMs()) / 1000); case RETRY_TEST_SCENARIO_FIXED_TIMING: return (long) Math.ceil(this.factory.options.retryDelayInMs() / 1000); default: throw new IllegalArgumentException("Invalid test scenario"); } } private OffsetDateTime calcUpperBound(OffsetDateTime start, int primaryTryNumber, boolean tryingPrimary) { if (tryingPrimary) { return start.plus(calcPrimaryDelay(primaryTryNumber) * 1000 + 500, ChronoUnit.MILLIS); } else { return start.plus(1400, ChronoUnit.MILLIS); } } private OffsetDateTime calcLowerBound(OffsetDateTime start, int primaryTryNumber, boolean tryingPrimary) { if (tryingPrimary) { return start.plus(calcPrimaryDelay(primaryTryNumber) * 1000 - 500, ChronoUnit.MILLIS); } else { return start.plus(700, ChronoUnit.MILLIS); } } private Single<HttpResponse> testDelayBounds(int primaryTryNumber, boolean tryingPrimary, Single<HttpResponse> response) { /* We have to return a new Single so that the calculation for time is performed at the correct time, i.e. when the Single is actually subscribed to. This mocks an HttpClient because the requests are made only when the Single is subscribed to, not when all the infrastructure around it is put in place, and we care about the delay before the request itself. */ return new Single<HttpResponse>() { @Override protected void subscribeActual(SingleObserver<? super HttpResponse> observer) { try { if (OffsetDateTime.now().isAfter(calcUpperBound(factory.time, primaryTryNumber, tryingPrimary)) || OffsetDateTime.now() .isBefore(calcLowerBound(factory.time, primaryTryNumber, tryingPrimary))) { throw new IllegalArgumentException("Delay was not within jitter bounds"); } factory.time = OffsetDateTime.now(); /* We can blocking get because it's not actually an IO call. Everything returned here returns Single.just(response). */ HttpResponse unwrappedResponse = response.blockingGet(); observer.onSuccess(unwrappedResponse); } catch (StorageErrorException | IllegalArgumentException e) { observer.onError(e); } } }; } private Single<HttpResponse> testMaxDelayBounds(Single<HttpResponse> response) { return new Single<HttpResponse>() { @Override protected void subscribeActual(SingleObserver<? super HttpResponse> observer) { try { if (OffsetDateTime.now().isAfter(factory.time.plusSeconds( (long) Math.ceil((factory.options.maxRetryDelayInMs() / 1000) + 1)))) { throw new IllegalArgumentException("Max retry delay exceeded"); } else if (OffsetDateTime.now().isBefore(factory.time.plusSeconds( (long) Math.ceil((factory.options.maxRetryDelayInMs() / 1000) - 1)))) { throw new IllegalArgumentException("Retry did not delay long enough"); } factory.time = OffsetDateTime.now(); HttpResponse unwrappedResponse = response.blockingGet(); observer.onSuccess(unwrappedResponse); } catch (StorageErrorException | IllegalArgumentException e) { observer.onError(e); } } }; } }
both tests are tested in test cases: (1) isRetriable: https://github.com/Azure/azure-sdk-for-java/pull/3818/files#diff-88f19a64404192ec0b032ec6f648b628R57 (2) Excess max retry: https://github.com/Azure/azure-sdk-for-java/pull/3818/files#diff-88f19a64404192ec0b032ec6f648b628R72
public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) { int baseWaitTime = 0; if (lastException == null || !(lastException instanceof AmqpException)) { return this.onGetNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); } if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) { baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS; } return this.onGetNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); }
if (lastException == null || !(lastException instanceof AmqpException)) {
public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) { int baseWaitTime = 0; if (!isRetriableException(lastException)) { return null; } if (retryCount.get() >= maxRetryCount) { return null; } if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) { baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS; } return this.calculateNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); }
class Retry { public static final Retry NO_RETRY = new RetryExponential(Duration.ofSeconds(0), Duration.ofSeconds(0), 0); private AtomicInteger retryCount = new AtomicInteger(0); /** * Check if the existing exception is a retryable exception. * * @param exception A exception that was observed for the operation to be retried. * @return true if the exception is a retryable exception, otherwise false. * @throws IllegalArgumentException when the exception is null. */ public static boolean isRetryableException(Exception exception) { if (exception == null) { throw new IllegalArgumentException("exception cannot be null"); } if (exception instanceof AmqpException) { return ((AmqpException) exception).isTransient(); } return false; } /** * Get 'NO_RETRY' of current. * * @return Retry 'NO_RETRY'. */ public static Retry getNoRetry() { return Retry.NO_RETRY; } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getDefault() { return new RetryExponential( ClientConstants.DEFAULT_RETRY_MIN_BACKOFF, ClientConstants.DEFAULT_RETRY_MAX_BACKOFF, ClientConstants.DEFAULT_MAX_RETRY_COUNT); } /** * Increase one count to current count value. * * @return current AtomicInteger value. */ public int incrementRetryCount() { return retryCount.incrementAndGet(); } /** * Get the current retried count. * * @return current AtomicInteger value. */ public int getRetryCount() { return retryCount.get(); } /** * reset AtomicInteger to value zero. */ public void resetRetryInterval() { retryCount.set(0); } /** * Calculates the amount of time to delay before the next retry attempt * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried. */ /** * Allows a concrete retry policy implementation to offer a base retry interval to be used in * the calculations performed by 'Retry.GetNextRetryInterval'. * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @param baseWaitSeconds The number of seconds to base the suggested retry interval on; * this should be used as the minimum interval returned under normal circumstances. * @param retryCount The number of retries that have already been attempted. * @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried. */ protected abstract Duration onGetNextRetryInterval(Exception lastException, Duration remainingTime, int baseWaitSeconds, int retryCount); }
class Retry { public static final Duration DEFAULT_RETRY_MIN_BACKOFF = Duration.ofSeconds(0); public static final Duration DEFAULT_RETRY_MAX_BACKOFF = Duration.ofSeconds(30); public static final int DEFAULT_MAX_RETRY_COUNT = 10; private final AtomicInteger retryCount = new AtomicInteger(); private final int maxRetryCount; /** * Creates a new instance of Retry with the maximum retry count of {@code maxRetryCount} * * @param maxRetryCount The maximum number of retries allowed. */ public Retry(int maxRetryCount) { this.maxRetryCount = maxRetryCount; } /** * Check if the existing exception is a retriable exception. * * @param exception An exception that was observed for the operation to be retried. * @return true if the exception is a retriable exception, otherwise false. */ public static boolean isRetriableException(Exception exception) { return (exception instanceof AmqpException) && ((AmqpException) exception).isTransient(); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getNoRetry() { return new ExponentialRetry(Duration.ZERO, Duration.ZERO, 0); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getDefaultRetry() { return new ExponentialRetry(DEFAULT_RETRY_MIN_BACKOFF, DEFAULT_RETRY_MAX_BACKOFF, DEFAULT_MAX_RETRY_COUNT); } /** * Increase one count to current count value. * * @return current AtomicInteger value. */ public int incrementRetryCount() { return retryCount.incrementAndGet(); } /** * Get the current retried count. * * @return current AtomicInteger value. */ public int getRetryCount() { return retryCount.get(); } /** * Reset AtomicInteger to value zero. */ public void resetRetryInterval() { retryCount.set(0); } /** * Get the maximum allowed retry count. * * @return maximum allowed retry count value. */ public int maxRetryCount() { return maxRetryCount; } /** * Calculates the amount of time to delay before the next retry attempt * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @return The amount of time to delay before retrying the associated operation; if {@code null}, * then the operation is no longer eligible to be retried. */ /** * Allows a concrete retry policy implementation to offer a base retry interval to be used in * the calculations performed by 'Retry.GetNextRetryInterval'. * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @param baseWaitSeconds The number of seconds to base the suggested retry interval on; * this should be used as the minimum interval returned under normal circumstances. * @param retryCount The number of retries that have already been attempted. * @return The amount of time to delay before retrying the associated operation; if {@code null}, * then the operation is no longer eligible to be retried. */ protected abstract Duration calculateNextRetryInterval(Exception lastException, Duration remainingTime, int baseWaitSeconds, int retryCount); }
Is it possible that the environment variable is not set? ``` if (ImplUtils.IsNullOrEmpty(connStr)) { // throw here. } ```
public EventHubClient build() { this.configuration = this.configuration == null ? ConfigurationManager.getConfiguration().clone() : this.configuration; this.proxyConfiguration = constructDefaultProxyConfiguration(this.configuration); if (this.credentials == null) { String connStr = this.configuration.get(AZURE_EVENT_HUB_CONNECTION_STRING); this.credentials = new CredentialInfo(connStr); } if (this.duration == null) { this.duration = Duration.ofSeconds(5); } if (this.scheduler == null) { this.scheduler = Schedulers.elastic(); } return new EventHubClient(); }
String connStr = this.configuration.get(AZURE_EVENT_HUB_CONNECTION_STRING);
public EventHubClient build() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Connection string is null or empty."); } credentials = CredentialInfo.from(connectionString); } if (timeout == null) { timeout = Duration.ofSeconds(60); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final SharedAccessSignatureTokenProvider tokenProvider; try { tokenProvider = new SharedAccessSignatureTokenProvider(credentials.sharedAccessKeyName(), credentials.sharedAccessKey()); } catch (NoSuchAlgorithmException | InvalidKeyException e) { throw new AzureException("Could not create token provider."); } if (retry == null) { retry = Retry.getDefaultRetry(); } proxyConfiguration = constructDefaultProxyConfiguration(configuration); if (scheduler == null) { scheduler = Schedulers.elastic(); } ConnectionParameters connectionParameters = new ConnectionParameters(credentials, timeout, tokenProvider, transport, retry, proxyConfiguration, scheduler); return new EventHubClient(connectionParameters, tokenProvider, provider, handlerProvider, scheduler); }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUB_CONNECTION_STRING = "AZURE_EVENT_HUB_CONNECTION_STRING"; private CredentialInfo credentials; private TransportType transport; private Duration duration; private Scheduler scheduler; private ProxyConfiguration proxyConfiguration; private RetryPolicy retryPolicy; private Configuration configuration; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { this.transport = TransportType.AMQP; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * * @return A new {@link EventHubClient} instance. */ /** * Sets the credentials information from connection string * * @param credentials Credentials for the EventHubClient. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder credentials(CredentialInfo credentials) { this.credentials = credentials; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param duration Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration duration) { this.duration = duration; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxy(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retryPolicy The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = null; if (this.proxyConfiguration != null) { authentication = this.proxyConfiguration.authentication(); } authentication = authentication == null ? ProxyAuthenticationType.NONE : authentication; String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); String host; Integer port; Proxy proxy = null; if (proxyAddress != null) { String[] hostPort = proxyAddress.split(":"); host = hostPort[0]; port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); } }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private CredentialInfo credentials; private Configuration configuration; private Duration timeout; private ProxyConfiguration proxyConfiguration; private Retry retry; private Scheduler scheduler; private TransportType transport; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { transport = TransportType.AMQP; } /** * Sets the credentials information from connection string * * @param credentials Credentials for the EventHubClient. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder credentials(CredentialInfo credentials) { this.credentials = credentials; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param timeout Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration timeout) { this.timeout = timeout; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retry The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(Retry retry) { this.retry = retry; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * Use the default not null values if the Connection parameters are not provided. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty. * @throws AzureException If the token provider cannot be created for authorizing requests. */ private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyConfiguration != null) { authentication = proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); Proxy proxy = null; if (proxyAddress != null) { String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } String host = hostPort[0]; Integer port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); } }
There is a default in ClientConstants. OperationTimeout. I believe it's 1 minute.
public EventHubClient build() { this.configuration = this.configuration == null ? ConfigurationManager.getConfiguration().clone() : this.configuration; this.proxyConfiguration = constructDefaultProxyConfiguration(this.configuration); if (this.credentials == null) { String connStr = this.configuration.get(AZURE_EVENT_HUB_CONNECTION_STRING); this.credentials = new CredentialInfo(connStr); } if (this.duration == null) { this.duration = Duration.ofSeconds(5); } if (this.scheduler == null) { this.scheduler = Schedulers.elastic(); } return new EventHubClient(); }
this.duration = Duration.ofSeconds(5);
public EventHubClient build() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Connection string is null or empty."); } credentials = CredentialInfo.from(connectionString); } if (timeout == null) { timeout = Duration.ofSeconds(60); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final SharedAccessSignatureTokenProvider tokenProvider; try { tokenProvider = new SharedAccessSignatureTokenProvider(credentials.sharedAccessKeyName(), credentials.sharedAccessKey()); } catch (NoSuchAlgorithmException | InvalidKeyException e) { throw new AzureException("Could not create token provider."); } if (retry == null) { retry = Retry.getDefaultRetry(); } proxyConfiguration = constructDefaultProxyConfiguration(configuration); if (scheduler == null) { scheduler = Schedulers.elastic(); } ConnectionParameters connectionParameters = new ConnectionParameters(credentials, timeout, tokenProvider, transport, retry, proxyConfiguration, scheduler); return new EventHubClient(connectionParameters, tokenProvider, provider, handlerProvider, scheduler); }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUB_CONNECTION_STRING = "AZURE_EVENT_HUB_CONNECTION_STRING"; private CredentialInfo credentials; private TransportType transport; private Duration duration; private Scheduler scheduler; private ProxyConfiguration proxyConfiguration; private RetryPolicy retryPolicy; private Configuration configuration; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { this.transport = TransportType.AMQP; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * * @return A new {@link EventHubClient} instance. */ /** * Sets the credentials information from connection string * * @param credentials Credentials for the EventHubClient. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder credentials(CredentialInfo credentials) { this.credentials = credentials; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param duration Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration duration) { this.duration = duration; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxy(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retryPolicy The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = null; if (this.proxyConfiguration != null) { authentication = this.proxyConfiguration.authentication(); } authentication = authentication == null ? ProxyAuthenticationType.NONE : authentication; String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); String host; Integer port; Proxy proxy = null; if (proxyAddress != null) { String[] hostPort = proxyAddress.split(":"); host = hostPort[0]; port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); } }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private CredentialInfo credentials; private Configuration configuration; private Duration timeout; private ProxyConfiguration proxyConfiguration; private Retry retry; private Scheduler scheduler; private TransportType transport; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { transport = TransportType.AMQP; } /** * Sets the credentials information from connection string * * @param credentials Credentials for the EventHubClient. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder credentials(CredentialInfo credentials) { this.credentials = credentials; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param timeout Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration timeout) { this.timeout = timeout; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retry The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(Retry retry) { this.retry = retry; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * Use the default not null values if the Connection parameters are not provided. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty. * @throws AzureException If the token provider cannot be created for authorizing requests. */ private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyConfiguration != null) { authentication = proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); Proxy proxy = null; if (proxyAddress != null) { String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } String host = hostPort[0]; Integer port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); } }
```java ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (this.proxyConfiguration != null) { authentication = this.proxyConfiguration.authentication(); } ```
private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = null; if (this.proxyConfiguration != null) { authentication = this.proxyConfiguration.authentication(); } authentication = authentication == null ? ProxyAuthenticationType.NONE : authentication; String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); String host; Integer port; Proxy proxy = null; if (proxyAddress != null) { String[] hostPort = proxyAddress.split(":"); host = hostPort[0]; port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); }
ProxyAuthenticationType authentication = null;
private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyConfiguration != null) { authentication = proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); Proxy proxy = null; if (proxyAddress != null) { String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } String host = hostPort[0]; Integer port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUB_CONNECTION_STRING = "AZURE_EVENT_HUB_CONNECTION_STRING"; private CredentialInfo credentials; private TransportType transport; private Duration duration; private Scheduler scheduler; private ProxyConfiguration proxyConfiguration; private RetryPolicy retryPolicy; private Configuration configuration; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { this.transport = TransportType.AMQP; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * * @return A new {@link EventHubClient} instance. */ public EventHubClient build() { this.configuration = this.configuration == null ? ConfigurationManager.getConfiguration().clone() : this.configuration; this.proxyConfiguration = constructDefaultProxyConfiguration(this.configuration); if (this.credentials == null) { String connStr = this.configuration.get(AZURE_EVENT_HUB_CONNECTION_STRING); this.credentials = new CredentialInfo(connStr); } if (this.duration == null) { this.duration = Duration.ofSeconds(5); } if (this.scheduler == null) { this.scheduler = Schedulers.elastic(); } return new EventHubClient(); } /** * Sets the credentials information from connection string * * @param credentials Credentials for the EventHubClient. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder credentials(CredentialInfo credentials) { this.credentials = credentials; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param duration Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration duration) { this.duration = duration; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxy(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retryPolicy The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private CredentialInfo credentials; private Configuration configuration; private Duration timeout; private ProxyConfiguration proxyConfiguration; private Retry retry; private Scheduler scheduler; private TransportType transport; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { transport = TransportType.AMQP; } /** * Sets the credentials information from connection string * * @param credentials Credentials for the EventHubClient. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder credentials(CredentialInfo credentials) { this.credentials = credentials; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param timeout Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration timeout) { this.timeout = timeout; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retry The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(Retry retry) { this.retry = retry; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * Use the default not null values if the Connection parameters are not provided. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty. * @throws AzureException If the token provider cannot be created for authorizing requests. */ public EventHubClient build() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Connection string is null or empty."); } credentials = CredentialInfo.from(connectionString); } if (timeout == null) { timeout = Duration.ofSeconds(60); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final SharedAccessSignatureTokenProvider tokenProvider; try { tokenProvider = new SharedAccessSignatureTokenProvider(credentials.sharedAccessKeyName(), credentials.sharedAccessKey()); } catch (NoSuchAlgorithmException | InvalidKeyException e) { throw new AzureException("Could not create token provider."); } if (retry == null) { retry = Retry.getDefaultRetry(); } proxyConfiguration = constructDefaultProxyConfiguration(configuration); if (scheduler == null) { scheduler = Schedulers.elastic(); } ConnectionParameters connectionParameters = new ConnectionParameters(credentials, timeout, tokenProvider, transport, retry, proxyConfiguration, scheduler); return new EventHubClient(connectionParameters, tokenProvider, provider, handlerProvider, scheduler); } }
In general, move your public methods to the top. Then private methods to the bottom. Users will interact with public methods first before looking at these private helper methods.
private static URI getURI(String endpointFormat, String namespaceName, String domainName) { try { return new URI(String.format(Locale.US, endpointFormat, namespaceName, domainName)); } catch (URISyntaxException exception) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid namespace name: %s", namespaceName), exception); } }
return new URI(String.format(Locale.US, endpointFormat, namespaceName, domainName));
private static URI getURI(String endpointFormat, String namespaceName, String domainName) { try { return new URI(String.format(Locale.US, endpointFormat, namespaceName, domainName)); } catch (URISyntaxException exception) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid namespace name: %s", namespaceName), exception); } }
class EventHubClientBuilderTest { private static final String END_POINT_FORMAT = "sb: private static final String NAMESPACE_NAME = "dummyNamespaceName"; private static final String DEFAULT_DOMAIN_NAME = "servicebus.windows.net/"; private static final String SHARED_ACCESS_KEY_NAME = "dummySasKeyName"; private static final String SHARED_ACCESS_KEY = "dummySasKey"; private static final String END_POINT_STR = getURI(END_POINT_FORMAT, NAMESPACE_NAME, DEFAULT_DOMAIN_NAME).toString(); private static final String PROXY_HOST = "/127.0.0.1"; private static final String PROXY_PORT = "3128"; private static final String correctConnStr = String.format("Endpoint=%s;SharedAccessKeyName=%s;SharedAccessKey=%s", END_POINT_STR, SHARED_ACCESS_KEY_NAME, SHARED_ACCESS_KEY); private static Proxy proxyAddress = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(PROXY_HOST, Integer.parseInt(PROXY_PORT))); private static final CredentialInfo credentialInfo = new CredentialInfo(correctConnStr); @Test(expected=IllegalArgumentException.class) public void missingConnStrBuilder() { EventHubClientBuilder builder = new EventHubClientBuilder(); builder.build(); } @Test public void defaultProxyConfigurationBuilder() { EventHubClientBuilder builder = new EventHubClientBuilder(); builder.credentials(this.credentialInfo).build(); } @Test public void customNoneProxyConfigurationBuilder() { EventHubClientBuilder builder = new EventHubClientBuilder(); ProxyConfiguration proxyConfig = new ProxyConfiguration(ProxyAuthenticationType.NONE, this.proxyAddress, null, null); builder.credentials(this.credentialInfo) .proxy(proxyConfig) .build(); } }
class EventHubClientBuilderTest { private static final String NAMESPACE_NAME = "dummyNamespaceName"; private static final String DEFAULT_DOMAIN_NAME = "servicebus.windows.net/"; private static final String ENTITY_PATH = "dummyEntityPath"; private static final String SHARED_ACCESS_KEY_NAME = "dummySasKeyName"; private static final String SHARED_ACCESS_KEY = "dummySasKey"; private static final String ENDPOINT = getURI(ClientConstants.ENDPOINT_FORMAT, NAMESPACE_NAME, DEFAULT_DOMAIN_NAME).toString(); private static final String PROXY_HOST = "127.0.0.1"; private static final String PROXY_PORT = "3128"; private static final String CORRECT_CONNECTION_STRING = String.format("Endpoint=%s;SharedAccessKeyName=%s;SharedAccessKey=%s;EntityPath=%s", ENDPOINT, SHARED_ACCESS_KEY_NAME, SHARED_ACCESS_KEY, ENTITY_PATH); private static Proxy proxyAddress = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(PROXY_HOST, Integer.parseInt(PROXY_PORT))); private static final CredentialInfo VALID_CREDENTIAL_INFO = CredentialInfo.from(CORRECT_CONNECTION_STRING); @Test(expected = IllegalArgumentException.class) public void missingConnStrBuilder() { EventHubClientBuilder builder = new EventHubClientBuilder(); builder.build(); } @Test public void defaultProxyConfigurationBuilder() { EventHubClientBuilder builder = new EventHubClientBuilder(); builder.credentials(VALID_CREDENTIAL_INFO).build(); } @Test public void customNoneProxyConfigurationBuilder() { EventHubClientBuilder builder = new EventHubClientBuilder(); ProxyConfiguration proxyConfig = new ProxyConfiguration(ProxyAuthenticationType.NONE, this.proxyAddress, null, null); builder.credentials(VALID_CREDENTIAL_INFO) .proxyConfiguration(proxyConfig) .build(); } }
What are we expecting? Should be outlined in the method docs what components there are.
public static CredentialInfo from(String connectionString) { CredentialInfo credentialInfo = new CredentialInfo(); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException(connectionString); } String[] args = connectionString.split(";"); if (args.length < 3) { throw new IllegalArgumentException("invalid connection string segment count"); } for (String arg : args) { String segment = arg.trim(); if (segment.startsWith(ENDPOINT)) { try { credentialInfo.endpoint = new URI(segment.substring(ENDPOINT.length())); } catch (URISyntaxException e) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid endpoint: %s", segment), e); } } else if (segment.startsWith(SHARED_ACCESS_KEY_NAME)) { credentialInfo.sharedAccessKeyName = segment.substring(SHARED_ACCESS_KEY_NAME.length()); } else if (segment.startsWith(SHARED_ACCESS_KEY)) { credentialInfo.sharedAccessKey = segment.substring(SHARED_ACCESS_KEY.length()); } else if (segment.startsWith(ENTITY_PATH)) { credentialInfo.eventHubName = segment.substring(ENTITY_PATH.length()); } } if (credentialInfo.endpoint == null || credentialInfo.sharedAccessKeyName == null || credentialInfo.sharedAccessKey == null) { throw new IllegalArgumentException("Could not parse 'connectionString'." + " Expected format: 'Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};SharedAccessKey={sharedAccessKey}'. Actual:" + connectionString); } return credentialInfo; }
throw new IllegalArgumentException("invalid connection string segment count");
public static CredentialInfo from(String connectionString) { return createCredentialInfo(connectionString, null); }
class CredentialInfo { private static final String ENDPOINT = "Endpoint="; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName="; private static final String SHARED_ACCESS_KEY = "SharedAccessKey="; private static final String ENTITY_PATH = "EntityPath="; private static URI endpoint; private static String sharedAccessKeyName; private static String sharedAccessKey; private static String eventHubName; private CredentialInfo() { } /** * Create a CredentialInfo object from connection string. * * @param connectionString Connection String * @return CredentialInfo * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty, or cannot be translated into an * {@link CredentialInfo}, or have invalid format. */ URI endpoint() { return this.endpoint; } String sharedAccessKeyName() { return this.sharedAccessKeyName; } String sharedAccessKey() { return sharedAccessKey; } String eventHubName() { return eventHubName; } }
class CredentialInfo { private static final String TOKEN_VALUE_SEPERATOR = "="; private static final String TOKEN_VALUE_PAIR_DELIMITER = ";"; private static final String ENDPOINT = "Endpoint"; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName"; private static final String SHARED_ACCESS_KEY = "SharedAccessKey"; private static final String ENTITY_PATH = "EntityPath"; private URI endpoint; private String sharedAccessKeyName; private String sharedAccessKey; private String eventHubPath; private CredentialInfo() { } /** * Create a {@link CredentialInfo} object that maps all key-value pairs of ConnectionString, include {@code EntityPath}. * Such as the connection string from 'SAS Policy: root' which contains {@code EntityPath}, well known as 'EventHub Path'. * * @param connectionString Connection String, which should at least include Endpoint, SharedAccessKeyName, SharedAccessKey and EntityPath. * @return a new created {@link CredentialInfo}. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty, or cannot be translated into an * {@link CredentialInfo}, or have invalid format. */ /** * Create a {@link CredentialInfo} object that maps all key-value pairs of ConnectionString, exclude {@code EntityPath}. * Such as the connection string from 'SAS Policy: RootManageSharedAccessKey', which doesn't contain {@code EntityPath}. * * @param connectionString Connection String, which should at least include Endpoint, SharedAccessKeyName and SharedAccessKey. * @param eventHubPath EventHub Name that used in Azure Portal. * @return a new created {@link CredentialInfo}. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty, or cannot be translated into an * {@link CredentialInfo}, or have invalid format. */ public static CredentialInfo from(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("EventHub path is null or empty"); } return createCredentialInfo(connectionString, eventHubPath); } URI endpoint() { return this.endpoint; } String sharedAccessKeyName() { return this.sharedAccessKeyName; } String sharedAccessKey() { return sharedAccessKey; } String eventHubPath() { return eventHubPath; } private static CredentialInfo createCredentialInfo(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Connection string is null or empty"); } CredentialInfo credentialInfo = new CredentialInfo(); String[] tokenValuePairs = connectionString.split(TOKEN_VALUE_PAIR_DELIMITER); for (String tokenValuePair : tokenValuePairs) { String[] pair = tokenValuePair.split(TOKEN_VALUE_SEPERATOR); if (pair.length != 2) { throw new IllegalArgumentException(String.format(Locale.US, "Connection string has invalid key value pair: %s", tokenValuePair)); } String pairKey = pair[0].trim(); String pairValue = pair[1].trim(); if (pairKey.equalsIgnoreCase(ENDPOINT)) { try { credentialInfo.endpoint = new URI(pairValue); } catch (URISyntaxException e) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid endpoint: %s", tokenValuePair), e); } } else if (pairKey.equalsIgnoreCase(SHARED_ACCESS_KEY_NAME)) { credentialInfo.sharedAccessKeyName = pairValue; } else if (pairKey.equalsIgnoreCase(SHARED_ACCESS_KEY)) { credentialInfo.sharedAccessKey = pairValue; } else if (pairKey.equalsIgnoreCase(ENTITY_PATH)) { credentialInfo.eventHubPath = pairValue; } } if (!ImplUtils.isNullOrEmpty(eventHubPath)) { credentialInfo.eventHubPath = eventHubPath; } if (credentialInfo.endpoint == null || credentialInfo.sharedAccessKeyName == null || credentialInfo.sharedAccessKey == null || credentialInfo.eventHubPath == null) { throw new IllegalArgumentException("Could not parse 'connectionString'." + "Expected format: 'Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};" + "SharedAccessKey={sharedAccessKey};EntityPath={eventHubPath}'. Actual:" + connectionString); } return credentialInfo; } }
This should be a message, not the variable, which will be null or empty.
public static CredentialInfo from(String connectionString) { CredentialInfo credentialInfo = new CredentialInfo(); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException(connectionString); } String[] args = connectionString.split(";"); if (args.length < 3) { throw new IllegalArgumentException("invalid connection string segment count"); } for (String arg : args) { String segment = arg.trim(); if (segment.startsWith(ENDPOINT)) { try { credentialInfo.endpoint = new URI(segment.substring(ENDPOINT.length())); } catch (URISyntaxException e) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid endpoint: %s", segment), e); } } else if (segment.startsWith(SHARED_ACCESS_KEY_NAME)) { credentialInfo.sharedAccessKeyName = segment.substring(SHARED_ACCESS_KEY_NAME.length()); } else if (segment.startsWith(SHARED_ACCESS_KEY)) { credentialInfo.sharedAccessKey = segment.substring(SHARED_ACCESS_KEY.length()); } else if (segment.startsWith(ENTITY_PATH)) { credentialInfo.eventHubName = segment.substring(ENTITY_PATH.length()); } } if (credentialInfo.endpoint == null || credentialInfo.sharedAccessKeyName == null || credentialInfo.sharedAccessKey == null) { throw new IllegalArgumentException("Could not parse 'connectionString'." + " Expected format: 'Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};SharedAccessKey={sharedAccessKey}'. Actual:" + connectionString); } return credentialInfo; }
throw new IllegalArgumentException(connectionString);
public static CredentialInfo from(String connectionString) { return createCredentialInfo(connectionString, null); }
class CredentialInfo { private static final String ENDPOINT = "Endpoint="; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName="; private static final String SHARED_ACCESS_KEY = "SharedAccessKey="; private static final String ENTITY_PATH = "EntityPath="; private static URI endpoint; private static String sharedAccessKeyName; private static String sharedAccessKey; private static String eventHubName; private CredentialInfo() { } /** * Create a CredentialInfo object from connection string. * * @param connectionString Connection String * @return CredentialInfo * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty, or cannot be translated into an * {@link CredentialInfo}, or have invalid format. */ URI endpoint() { return this.endpoint; } String sharedAccessKeyName() { return this.sharedAccessKeyName; } String sharedAccessKey() { return sharedAccessKey; } String eventHubName() { return eventHubName; } }
class CredentialInfo { private static final String TOKEN_VALUE_SEPERATOR = "="; private static final String TOKEN_VALUE_PAIR_DELIMITER = ";"; private static final String ENDPOINT = "Endpoint"; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName"; private static final String SHARED_ACCESS_KEY = "SharedAccessKey"; private static final String ENTITY_PATH = "EntityPath"; private URI endpoint; private String sharedAccessKeyName; private String sharedAccessKey; private String eventHubPath; private CredentialInfo() { } /** * Create a {@link CredentialInfo} object that maps all key-value pairs of ConnectionString, include {@code EntityPath}. * Such as the connection string from 'SAS Policy: root' which contains {@code EntityPath}, well known as 'EventHub Path'. * * @param connectionString Connection String, which should at least include Endpoint, SharedAccessKeyName, SharedAccessKey and EntityPath. * @return a new created {@link CredentialInfo}. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty, or cannot be translated into an * {@link CredentialInfo}, or have invalid format. */ /** * Create a {@link CredentialInfo} object that maps all key-value pairs of ConnectionString, exclude {@code EntityPath}. * Such as the connection string from 'SAS Policy: RootManageSharedAccessKey', which doesn't contain {@code EntityPath}. * * @param connectionString Connection String, which should at least include Endpoint, SharedAccessKeyName and SharedAccessKey. * @param eventHubPath EventHub Name that used in Azure Portal. * @return a new created {@link CredentialInfo}. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty, or cannot be translated into an * {@link CredentialInfo}, or have invalid format. */ public static CredentialInfo from(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("EventHub path is null or empty"); } return createCredentialInfo(connectionString, eventHubPath); } URI endpoint() { return this.endpoint; } String sharedAccessKeyName() { return this.sharedAccessKeyName; } String sharedAccessKey() { return sharedAccessKey; } String eventHubPath() { return eventHubPath; } private static CredentialInfo createCredentialInfo(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Connection string is null or empty"); } CredentialInfo credentialInfo = new CredentialInfo(); String[] tokenValuePairs = connectionString.split(TOKEN_VALUE_PAIR_DELIMITER); for (String tokenValuePair : tokenValuePairs) { String[] pair = tokenValuePair.split(TOKEN_VALUE_SEPERATOR); if (pair.length != 2) { throw new IllegalArgumentException(String.format(Locale.US, "Connection string has invalid key value pair: %s", tokenValuePair)); } String pairKey = pair[0].trim(); String pairValue = pair[1].trim(); if (pairKey.equalsIgnoreCase(ENDPOINT)) { try { credentialInfo.endpoint = new URI(pairValue); } catch (URISyntaxException e) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid endpoint: %s", tokenValuePair), e); } } else if (pairKey.equalsIgnoreCase(SHARED_ACCESS_KEY_NAME)) { credentialInfo.sharedAccessKeyName = pairValue; } else if (pairKey.equalsIgnoreCase(SHARED_ACCESS_KEY)) { credentialInfo.sharedAccessKey = pairValue; } else if (pairKey.equalsIgnoreCase(ENTITY_PATH)) { credentialInfo.eventHubPath = pairValue; } } if (!ImplUtils.isNullOrEmpty(eventHubPath)) { credentialInfo.eventHubPath = eventHubPath; } if (credentialInfo.endpoint == null || credentialInfo.sharedAccessKeyName == null || credentialInfo.sharedAccessKey == null || credentialInfo.eventHubPath == null) { throw new IllegalArgumentException("Could not parse 'connectionString'." + "Expected format: 'Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};" + "SharedAccessKey={sharedAccessKey};EntityPath={eventHubPath}'. Actual:" + connectionString); } return credentialInfo; } }
The .NET one does an "ordinal ignore case" comparison. We should try to do the ignore case to these too.
public static CredentialInfo from(String connectionString) { CredentialInfo credentialInfo = new CredentialInfo(); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException(connectionString); } String[] args = connectionString.split(";"); if (args.length < 3) { throw new IllegalArgumentException("invalid connection string segment count"); } for (String arg : args) { String segment = arg.trim(); if (segment.startsWith(ENDPOINT)) { try { credentialInfo.endpoint = new URI(segment.substring(ENDPOINT.length())); } catch (URISyntaxException e) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid endpoint: %s", segment), e); } } else if (segment.startsWith(SHARED_ACCESS_KEY_NAME)) { credentialInfo.sharedAccessKeyName = segment.substring(SHARED_ACCESS_KEY_NAME.length()); } else if (segment.startsWith(SHARED_ACCESS_KEY)) { credentialInfo.sharedAccessKey = segment.substring(SHARED_ACCESS_KEY.length()); } else if (segment.startsWith(ENTITY_PATH)) { credentialInfo.eventHubName = segment.substring(ENTITY_PATH.length()); } } if (credentialInfo.endpoint == null || credentialInfo.sharedAccessKeyName == null || credentialInfo.sharedAccessKey == null) { throw new IllegalArgumentException("Could not parse 'connectionString'." + " Expected format: 'Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};SharedAccessKey={sharedAccessKey}'. Actual:" + connectionString); } return credentialInfo; }
if (segment.startsWith(ENDPOINT)) {
public static CredentialInfo from(String connectionString) { return createCredentialInfo(connectionString, null); }
class CredentialInfo { private static final String ENDPOINT = "Endpoint="; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName="; private static final String SHARED_ACCESS_KEY = "SharedAccessKey="; private static final String ENTITY_PATH = "EntityPath="; private static URI endpoint; private static String sharedAccessKeyName; private static String sharedAccessKey; private static String eventHubName; private CredentialInfo() { } /** * Create a CredentialInfo object from connection string. * * @param connectionString Connection String * @return CredentialInfo * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty, or cannot be translated into an * {@link CredentialInfo}, or have invalid format. */ URI endpoint() { return this.endpoint; } String sharedAccessKeyName() { return this.sharedAccessKeyName; } String sharedAccessKey() { return sharedAccessKey; } String eventHubName() { return eventHubName; } }
class CredentialInfo { private static final String TOKEN_VALUE_SEPERATOR = "="; private static final String TOKEN_VALUE_PAIR_DELIMITER = ";"; private static final String ENDPOINT = "Endpoint"; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName"; private static final String SHARED_ACCESS_KEY = "SharedAccessKey"; private static final String ENTITY_PATH = "EntityPath"; private URI endpoint; private String sharedAccessKeyName; private String sharedAccessKey; private String eventHubPath; private CredentialInfo() { } /** * Create a {@link CredentialInfo} object that maps all key-value pairs of ConnectionString, include {@code EntityPath}. * Such as the connection string from 'SAS Policy: root' which contains {@code EntityPath}, well known as 'EventHub Path'. * * @param connectionString Connection String, which should at least include Endpoint, SharedAccessKeyName, SharedAccessKey and EntityPath. * @return a new created {@link CredentialInfo}. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty, or cannot be translated into an * {@link CredentialInfo}, or have invalid format. */ /** * Create a {@link CredentialInfo} object that maps all key-value pairs of ConnectionString, exclude {@code EntityPath}. * Such as the connection string from 'SAS Policy: RootManageSharedAccessKey', which doesn't contain {@code EntityPath}. * * @param connectionString Connection String, which should at least include Endpoint, SharedAccessKeyName and SharedAccessKey. * @param eventHubPath EventHub Name that used in Azure Portal. * @return a new created {@link CredentialInfo}. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty, or cannot be translated into an * {@link CredentialInfo}, or have invalid format. */ public static CredentialInfo from(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("EventHub path is null or empty"); } return createCredentialInfo(connectionString, eventHubPath); } URI endpoint() { return this.endpoint; } String sharedAccessKeyName() { return this.sharedAccessKeyName; } String sharedAccessKey() { return sharedAccessKey; } String eventHubPath() { return eventHubPath; } private static CredentialInfo createCredentialInfo(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Connection string is null or empty"); } CredentialInfo credentialInfo = new CredentialInfo(); String[] tokenValuePairs = connectionString.split(TOKEN_VALUE_PAIR_DELIMITER); for (String tokenValuePair : tokenValuePairs) { String[] pair = tokenValuePair.split(TOKEN_VALUE_SEPERATOR); if (pair.length != 2) { throw new IllegalArgumentException(String.format(Locale.US, "Connection string has invalid key value pair: %s", tokenValuePair)); } String pairKey = pair[0].trim(); String pairValue = pair[1].trim(); if (pairKey.equalsIgnoreCase(ENDPOINT)) { try { credentialInfo.endpoint = new URI(pairValue); } catch (URISyntaxException e) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid endpoint: %s", tokenValuePair), e); } } else if (pairKey.equalsIgnoreCase(SHARED_ACCESS_KEY_NAME)) { credentialInfo.sharedAccessKeyName = pairValue; } else if (pairKey.equalsIgnoreCase(SHARED_ACCESS_KEY)) { credentialInfo.sharedAccessKey = pairValue; } else if (pairKey.equalsIgnoreCase(ENTITY_PATH)) { credentialInfo.eventHubPath = pairValue; } } if (!ImplUtils.isNullOrEmpty(eventHubPath)) { credentialInfo.eventHubPath = eventHubPath; } if (credentialInfo.endpoint == null || credentialInfo.sharedAccessKeyName == null || credentialInfo.sharedAccessKey == null || credentialInfo.eventHubPath == null) { throw new IllegalArgumentException("Could not parse 'connectionString'." + "Expected format: 'Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};" + "SharedAccessKey={sharedAccessKey};EntityPath={eventHubPath}'. Actual:" + connectionString); } return credentialInfo; } }
Use the error message from: https://github.com/Azure/azure-sdk-for-java/pull/3791/files#diff-a814032d30ff1457abeea40ac2ea360aR64
public static CredentialInfo from(String connectionString) { CredentialInfo credentialInfo = new CredentialInfo(); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException(connectionString); } String[] args = connectionString.split(";"); if (args.length < 3) { throw new IllegalArgumentException("invalid connection string segment count"); } for (String arg : args) { String segment = arg.trim(); if (segment.startsWith(ENDPOINT)) { try { credentialInfo.endpoint = new URI(segment.substring(ENDPOINT.length())); } catch (URISyntaxException e) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid endpoint: %s", segment), e); } } else if (segment.startsWith(SHARED_ACCESS_KEY_NAME)) { credentialInfo.sharedAccessKeyName = segment.substring(SHARED_ACCESS_KEY_NAME.length()); } else if (segment.startsWith(SHARED_ACCESS_KEY)) { credentialInfo.sharedAccessKey = segment.substring(SHARED_ACCESS_KEY.length()); } else if (segment.startsWith(ENTITY_PATH)) { credentialInfo.eventHubName = segment.substring(ENTITY_PATH.length()); } } if (credentialInfo.endpoint == null || credentialInfo.sharedAccessKeyName == null || credentialInfo.sharedAccessKey == null) { throw new IllegalArgumentException("Could not parse 'connectionString'." + " Expected format: 'Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};SharedAccessKey={sharedAccessKey}'. Actual:" + connectionString); } return credentialInfo; }
throw new IllegalArgumentException("invalid connection string segment count");
public static CredentialInfo from(String connectionString) { return createCredentialInfo(connectionString, null); }
class CredentialInfo { private static final String ENDPOINT = "Endpoint="; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName="; private static final String SHARED_ACCESS_KEY = "SharedAccessKey="; private static final String ENTITY_PATH = "EntityPath="; private static URI endpoint; private static String sharedAccessKeyName; private static String sharedAccessKey; private static String eventHubName; private CredentialInfo() { } /** * Create a CredentialInfo object from connection string. * * @param connectionString Connection String * @return CredentialInfo * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty, or cannot be translated into an * {@link CredentialInfo}, or have invalid format. */ URI endpoint() { return this.endpoint; } String sharedAccessKeyName() { return this.sharedAccessKeyName; } String sharedAccessKey() { return sharedAccessKey; } String eventHubName() { return eventHubName; } }
class CredentialInfo { private static final String TOKEN_VALUE_SEPERATOR = "="; private static final String TOKEN_VALUE_PAIR_DELIMITER = ";"; private static final String ENDPOINT = "Endpoint"; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName"; private static final String SHARED_ACCESS_KEY = "SharedAccessKey"; private static final String ENTITY_PATH = "EntityPath"; private URI endpoint; private String sharedAccessKeyName; private String sharedAccessKey; private String eventHubPath; private CredentialInfo() { } /** * Create a {@link CredentialInfo} object that maps all key-value pairs of ConnectionString, include {@code EntityPath}. * Such as the connection string from 'SAS Policy: root' which contains {@code EntityPath}, well known as 'EventHub Path'. * * @param connectionString Connection String, which should at least include Endpoint, SharedAccessKeyName, SharedAccessKey and EntityPath. * @return a new created {@link CredentialInfo}. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty, or cannot be translated into an * {@link CredentialInfo}, or have invalid format. */ /** * Create a {@link CredentialInfo} object that maps all key-value pairs of ConnectionString, exclude {@code EntityPath}. * Such as the connection string from 'SAS Policy: RootManageSharedAccessKey', which doesn't contain {@code EntityPath}. * * @param connectionString Connection String, which should at least include Endpoint, SharedAccessKeyName and SharedAccessKey. * @param eventHubPath EventHub Name that used in Azure Portal. * @return a new created {@link CredentialInfo}. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty, or cannot be translated into an * {@link CredentialInfo}, or have invalid format. */ public static CredentialInfo from(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("EventHub path is null or empty"); } return createCredentialInfo(connectionString, eventHubPath); } URI endpoint() { return this.endpoint; } String sharedAccessKeyName() { return this.sharedAccessKeyName; } String sharedAccessKey() { return sharedAccessKey; } String eventHubPath() { return eventHubPath; } private static CredentialInfo createCredentialInfo(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Connection string is null or empty"); } CredentialInfo credentialInfo = new CredentialInfo(); String[] tokenValuePairs = connectionString.split(TOKEN_VALUE_PAIR_DELIMITER); for (String tokenValuePair : tokenValuePairs) { String[] pair = tokenValuePair.split(TOKEN_VALUE_SEPERATOR); if (pair.length != 2) { throw new IllegalArgumentException(String.format(Locale.US, "Connection string has invalid key value pair: %s", tokenValuePair)); } String pairKey = pair[0].trim(); String pairValue = pair[1].trim(); if (pairKey.equalsIgnoreCase(ENDPOINT)) { try { credentialInfo.endpoint = new URI(pairValue); } catch (URISyntaxException e) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid endpoint: %s", tokenValuePair), e); } } else if (pairKey.equalsIgnoreCase(SHARED_ACCESS_KEY_NAME)) { credentialInfo.sharedAccessKeyName = pairValue; } else if (pairKey.equalsIgnoreCase(SHARED_ACCESS_KEY)) { credentialInfo.sharedAccessKey = pairValue; } else if (pairKey.equalsIgnoreCase(ENTITY_PATH)) { credentialInfo.eventHubPath = pairValue; } } if (!ImplUtils.isNullOrEmpty(eventHubPath)) { credentialInfo.eventHubPath = eventHubPath; } if (credentialInfo.endpoint == null || credentialInfo.sharedAccessKeyName == null || credentialInfo.sharedAccessKey == null || credentialInfo.eventHubPath == null) { throw new IllegalArgumentException("Could not parse 'connectionString'." + "Expected format: 'Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};" + "SharedAccessKey={sharedAccessKey};EntityPath={eventHubPath}'. Actual:" + connectionString); } return credentialInfo; } }
nit: rename to connectionString rather than `connStr`. It's hard to read. This should be a message, not the variable itself.
public EventHubClient build() throws IllegalArgumentException { this.configuration = this.configuration == null ? ConfigurationManager.getConfiguration().clone() : this.configuration; this.proxyConfiguration = constructDefaultProxyConfiguration(this.configuration); if (this.credentials == null) { String connStr = this.configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connStr)) { throw new IllegalArgumentException(connStr); } this.credentials = CredentialInfo.from(connStr); } if (this.duration == null) { this.duration = Duration.ofSeconds(60); } if (this.scheduler == null) { this.scheduler = Schedulers.elastic(); } return new EventHubClient(); }
throw new IllegalArgumentException(connStr);
public EventHubClient build() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Connection string is null or empty."); } credentials = CredentialInfo.from(connectionString); } if (timeout == null) { timeout = Duration.ofSeconds(60); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final SharedAccessSignatureTokenProvider tokenProvider; try { tokenProvider = new SharedAccessSignatureTokenProvider(credentials.sharedAccessKeyName(), credentials.sharedAccessKey()); } catch (NoSuchAlgorithmException | InvalidKeyException e) { throw new AzureException("Could not create token provider."); } if (retry == null) { retry = Retry.getDefaultRetry(); } proxyConfiguration = constructDefaultProxyConfiguration(configuration); if (scheduler == null) { scheduler = Schedulers.elastic(); } ConnectionParameters connectionParameters = new ConnectionParameters(credentials, timeout, tokenProvider, transport, retry, proxyConfiguration, scheduler); return new EventHubClient(connectionParameters, tokenProvider, provider, handlerProvider, scheduler); }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private CredentialInfo credentials; private TransportType transport; private Duration duration; private Scheduler scheduler; private ProxyConfiguration proxyConfiguration; private RetryPolicy retryPolicy; private Configuration configuration; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { this.transport = TransportType.AMQP; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException when 'connStr' is {@code null} or empty. */ /** * Sets the credentials information from connection string * * @param credentials Credentials for the EventHubClient. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder credentials(CredentialInfo credentials) { this.credentials = credentials; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param duration Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration duration) { this.duration = duration; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxy(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retryPolicy The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (this.proxyConfiguration != null) { authentication = this.proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); String host; Integer port; Proxy proxy = null; if (proxyAddress != null) { String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } host = hostPort[0]; port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); if (authentication == ProxyAuthenticationType.BASIC || authentication == ProxyAuthenticationType.DIGEST) { if (username == null) { throw new IllegalArgumentException(String.format("Proxy Authenticaiton is %s, username is missing", authentication)); } if (password == null) { throw new IllegalArgumentException(String.format("Proxy Authenticaiton is %s, password is missing", authentication)); } } return new ProxyConfiguration(authentication, proxy, username, password); } }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private CredentialInfo credentials; private Configuration configuration; private Duration timeout; private ProxyConfiguration proxyConfiguration; private Retry retry; private Scheduler scheduler; private TransportType transport; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { transport = TransportType.AMQP; } /** * Sets the credentials information from connection string * * @param credentials Credentials for the EventHubClient. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder credentials(CredentialInfo credentials) { this.credentials = credentials; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param timeout Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration timeout) { this.timeout = timeout; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retry The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(Retry retry) { this.retry = retry; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * Use the default not null values if the Connection parameters are not provided. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty. * @throws AzureException If the token provider cannot be created for authorizing requests. */ private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyConfiguration != null) { authentication = proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); Proxy proxy = null; if (proxyAddress != null) { String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } String host = hostPort[0]; Integer port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); } }
These variables (host, port, and proxy) can live inside the `if (proxyAddress != null) {` block. They aren't used outside of its scope.
private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (this.proxyConfiguration != null) { authentication = this.proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); String host; Integer port; Proxy proxy = null; if (proxyAddress != null) { String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } host = hostPort[0]; port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); if (authentication == ProxyAuthenticationType.BASIC || authentication == ProxyAuthenticationType.DIGEST) { if (username == null) { throw new IllegalArgumentException(String.format("Proxy Authenticaiton is %s, username is missing", authentication)); } if (password == null) { throw new IllegalArgumentException(String.format("Proxy Authenticaiton is %s, password is missing", authentication)); } } return new ProxyConfiguration(authentication, proxy, username, password); }
String host;
private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyConfiguration != null) { authentication = proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); Proxy proxy = null; if (proxyAddress != null) { String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } String host = hostPort[0]; Integer port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private CredentialInfo credentials; private TransportType transport; private Duration duration; private Scheduler scheduler; private ProxyConfiguration proxyConfiguration; private RetryPolicy retryPolicy; private Configuration configuration; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { this.transport = TransportType.AMQP; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException when 'connStr' is {@code null} or empty. */ public EventHubClient build() throws IllegalArgumentException { this.configuration = this.configuration == null ? ConfigurationManager.getConfiguration().clone() : this.configuration; this.proxyConfiguration = constructDefaultProxyConfiguration(this.configuration); if (this.credentials == null) { String connStr = this.configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connStr)) { throw new IllegalArgumentException(connStr); } this.credentials = CredentialInfo.from(connStr); } if (this.duration == null) { this.duration = Duration.ofSeconds(60); } if (this.scheduler == null) { this.scheduler = Schedulers.elastic(); } return new EventHubClient(); } /** * Sets the credentials information from connection string * * @param credentials Credentials for the EventHubClient. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder credentials(CredentialInfo credentials) { this.credentials = credentials; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param duration Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration duration) { this.duration = duration; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxy(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retryPolicy The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private CredentialInfo credentials; private Configuration configuration; private Duration timeout; private ProxyConfiguration proxyConfiguration; private Retry retry; private Scheduler scheduler; private TransportType transport; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { transport = TransportType.AMQP; } /** * Sets the credentials information from connection string * * @param credentials Credentials for the EventHubClient. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder credentials(CredentialInfo credentials) { this.credentials = credentials; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param timeout Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration timeout) { this.timeout = timeout; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retry The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(Retry retry) { this.retry = retry; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * Use the default not null values if the Connection parameters are not provided. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty. * @throws AzureException If the token provider cannot be created for authorizing requests. */ public EventHubClient build() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Connection string is null or empty."); } credentials = CredentialInfo.from(connectionString); } if (timeout == null) { timeout = Duration.ofSeconds(60); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final SharedAccessSignatureTokenProvider tokenProvider; try { tokenProvider = new SharedAccessSignatureTokenProvider(credentials.sharedAccessKeyName(), credentials.sharedAccessKey()); } catch (NoSuchAlgorithmException | InvalidKeyException e) { throw new AzureException("Could not create token provider."); } if (retry == null) { retry = Retry.getDefaultRetry(); } proxyConfiguration = constructDefaultProxyConfiguration(configuration); if (scheduler == null) { scheduler = Schedulers.elastic(); } ConnectionParameters connectionParameters = new ConnectionParameters(credentials, timeout, tokenProvider, transport, retry, proxyConfiguration, scheduler); return new EventHubClient(connectionParameters, tokenProvider, provider, handlerProvider, scheduler); } }
This if check can be removed. in the case that They want to use BASIC or DIGEST and do not pass in a username or password, the WebProxyHandler will fetch the System Defaults.
private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (this.proxyConfiguration != null) { authentication = this.proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); String host; Integer port; Proxy proxy = null; if (proxyAddress != null) { String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } host = hostPort[0]; port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); if (authentication == ProxyAuthenticationType.BASIC || authentication == ProxyAuthenticationType.DIGEST) { if (username == null) { throw new IllegalArgumentException(String.format("Proxy Authenticaiton is %s, username is missing", authentication)); } if (password == null) { throw new IllegalArgumentException(String.format("Proxy Authenticaiton is %s, password is missing", authentication)); } } return new ProxyConfiguration(authentication, proxy, username, password); }
if (authentication == ProxyAuthenticationType.BASIC || authentication == ProxyAuthenticationType.DIGEST) {
private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyConfiguration != null) { authentication = proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); Proxy proxy = null; if (proxyAddress != null) { String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } String host = hostPort[0]; Integer port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private CredentialInfo credentials; private TransportType transport; private Duration duration; private Scheduler scheduler; private ProxyConfiguration proxyConfiguration; private RetryPolicy retryPolicy; private Configuration configuration; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { this.transport = TransportType.AMQP; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException when 'connStr' is {@code null} or empty. */ public EventHubClient build() throws IllegalArgumentException { this.configuration = this.configuration == null ? ConfigurationManager.getConfiguration().clone() : this.configuration; this.proxyConfiguration = constructDefaultProxyConfiguration(this.configuration); if (this.credentials == null) { String connStr = this.configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connStr)) { throw new IllegalArgumentException(connStr); } this.credentials = CredentialInfo.from(connStr); } if (this.duration == null) { this.duration = Duration.ofSeconds(60); } if (this.scheduler == null) { this.scheduler = Schedulers.elastic(); } return new EventHubClient(); } /** * Sets the credentials information from connection string * * @param credentials Credentials for the EventHubClient. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder credentials(CredentialInfo credentials) { this.credentials = credentials; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param duration Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration duration) { this.duration = duration; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxy(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retryPolicy The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private CredentialInfo credentials; private Configuration configuration; private Duration timeout; private ProxyConfiguration proxyConfiguration; private Retry retry; private Scheduler scheduler; private TransportType transport; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { transport = TransportType.AMQP; } /** * Sets the credentials information from connection string * * @param credentials Credentials for the EventHubClient. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder credentials(CredentialInfo credentials) { this.credentials = credentials; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param timeout Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration timeout) { this.timeout = timeout; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retry The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(Retry retry) { this.retry = retry; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * Use the default not null values if the Connection parameters are not provided. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty. * @throws AzureException If the token provider cannot be created for authorizing requests. */ public EventHubClient build() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Connection string is null or empty."); } credentials = CredentialInfo.from(connectionString); } if (timeout == null) { timeout = Duration.ofSeconds(60); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final SharedAccessSignatureTokenProvider tokenProvider; try { tokenProvider = new SharedAccessSignatureTokenProvider(credentials.sharedAccessKeyName(), credentials.sharedAccessKey()); } catch (NoSuchAlgorithmException | InvalidKeyException e) { throw new AzureException("Could not create token provider."); } if (retry == null) { retry = Retry.getDefaultRetry(); } proxyConfiguration = constructDefaultProxyConfiguration(configuration); if (scheduler == null) { scheduler = Schedulers.elastic(); } ConnectionParameters connectionParameters = new ConnectionParameters(credentials, timeout, tokenProvider, transport, retry, proxyConfiguration, scheduler); return new EventHubClient(connectionParameters, tokenProvider, provider, handlerProvider, scheduler); } }
I think a smarter way to do this comparison is to do an additional split here on "=", and then compare the key of that path rather than the current way we are doing it. Having all these .toLowerCase() makes it unreadable where we could be using .equalsIgnoreCase(). If you need a reference, look at the .NET ConnectionString.
public static CredentialInfo from(String connectionString) { CredentialInfo credentialInfo = new CredentialInfo(); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("connection string is null or empty."); } String[] args = connectionString.split(";"); for (String arg : args) { String segment = arg.trim(); String lowerCaseSegment = segment.toLowerCase(Locale.ENGLISH); if (lowerCaseSegment.startsWith(ENDPOINT.toLowerCase(Locale.ENGLISH))) { try { credentialInfo.endpoint = new URI(segment.substring(ENDPOINT.length())); } catch (URISyntaxException e) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid endpoint: %s", segment), e); } } else if (lowerCaseSegment.startsWith(SHARED_ACCESS_KEY_NAME.toLowerCase(Locale.ENGLISH))) { credentialInfo.sharedAccessKeyName = segment.substring(SHARED_ACCESS_KEY_NAME.length()); } else if (lowerCaseSegment.startsWith(SHARED_ACCESS_KEY.toLowerCase(Locale.ENGLISH))) { credentialInfo.sharedAccessKey = segment.substring(SHARED_ACCESS_KEY.length()); } else if (lowerCaseSegment.startsWith(ENTITY_PATH.toLowerCase(Locale.ENGLISH))) { credentialInfo.eventHubName = segment.substring(ENTITY_PATH.length()); } } if (credentialInfo.endpoint == null || credentialInfo.sharedAccessKeyName == null || credentialInfo.sharedAccessKey == null || credentialInfo.eventHubName == null) { throw new IllegalArgumentException("Could not parse 'connectionString'." + "Expected format: 'Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};SharedAccessKey={sharedAccessKey};EntityPath={entityPath}'. Actual:" + connectionString); } return credentialInfo; }
if (lowerCaseSegment.startsWith(ENDPOINT.toLowerCase(Locale.ENGLISH))) {
public static CredentialInfo from(String connectionString) { return createCredentialInfo(connectionString, null); }
class CredentialInfo { private static final String ENDPOINT = "Endpoint="; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName="; private static final String SHARED_ACCESS_KEY = "SharedAccessKey="; private static final String ENTITY_PATH = "EntityPath="; private URI endpoint; private String sharedAccessKeyName; private String sharedAccessKey; private String eventHubName; private CredentialInfo() { } /** * Create a CredentialInfo object from connection string. * * @param connectionString Connection String, which should at least include Endpoint, SharedAccessKeyName and SharedAccessKey * @return CredentialInfo * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty, or cannot be translated into an * {@link CredentialInfo}, or have invalid format. */ URI endpoint() { return this.endpoint; } String sharedAccessKeyName() { return this.sharedAccessKeyName; } String sharedAccessKey() { return sharedAccessKey; } String eventHubName() { return eventHubName; } }
class CredentialInfo { private static final String TOKEN_VALUE_SEPERATOR = "="; private static final String TOKEN_VALUE_PAIR_DELIMITER = ";"; private static final String ENDPOINT = "Endpoint"; private static final String SHARED_ACCESS_KEY_NAME = "SharedAccessKeyName"; private static final String SHARED_ACCESS_KEY = "SharedAccessKey"; private static final String ENTITY_PATH = "EntityPath"; private URI endpoint; private String sharedAccessKeyName; private String sharedAccessKey; private String eventHubPath; private CredentialInfo() { } /** * Create a {@link CredentialInfo} object that maps all key-value pairs of ConnectionString, include {@code EntityPath}. * Such as the connection string from 'SAS Policy: root' which contains {@code EntityPath}, well known as 'EventHub Path'. * * @param connectionString Connection String, which should at least include Endpoint, SharedAccessKeyName, SharedAccessKey and EntityPath. * @return a new created {@link CredentialInfo}. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty, or cannot be translated into an * {@link CredentialInfo}, or have invalid format. */ /** * Create a {@link CredentialInfo} object that maps all key-value pairs of ConnectionString, exclude {@code EntityPath}. * Such as the connection string from 'SAS Policy: RootManageSharedAccessKey', which doesn't contain {@code EntityPath}. * * @param connectionString Connection String, which should at least include Endpoint, SharedAccessKeyName and SharedAccessKey. * @param eventHubPath EventHub Name that used in Azure Portal. * @return a new created {@link CredentialInfo}. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty, or cannot be translated into an * {@link CredentialInfo}, or have invalid format. */ public static CredentialInfo from(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("EventHub path is null or empty"); } return createCredentialInfo(connectionString, eventHubPath); } URI endpoint() { return this.endpoint; } String sharedAccessKeyName() { return this.sharedAccessKeyName; } String sharedAccessKey() { return sharedAccessKey; } String eventHubPath() { return eventHubPath; } private static CredentialInfo createCredentialInfo(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Connection string is null or empty"); } CredentialInfo credentialInfo = new CredentialInfo(); String[] tokenValuePairs = connectionString.split(TOKEN_VALUE_PAIR_DELIMITER); for (String tokenValuePair : tokenValuePairs) { String[] pair = tokenValuePair.split(TOKEN_VALUE_SEPERATOR); if (pair.length != 2) { throw new IllegalArgumentException(String.format(Locale.US, "Connection string has invalid key value pair: %s", tokenValuePair)); } String pairKey = pair[0].trim(); String pairValue = pair[1].trim(); if (pairKey.equalsIgnoreCase(ENDPOINT)) { try { credentialInfo.endpoint = new URI(pairValue); } catch (URISyntaxException e) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid endpoint: %s", tokenValuePair), e); } } else if (pairKey.equalsIgnoreCase(SHARED_ACCESS_KEY_NAME)) { credentialInfo.sharedAccessKeyName = pairValue; } else if (pairKey.equalsIgnoreCase(SHARED_ACCESS_KEY)) { credentialInfo.sharedAccessKey = pairValue; } else if (pairKey.equalsIgnoreCase(ENTITY_PATH)) { credentialInfo.eventHubPath = pairValue; } } if (!ImplUtils.isNullOrEmpty(eventHubPath)) { credentialInfo.eventHubPath = eventHubPath; } if (credentialInfo.endpoint == null || credentialInfo.sharedAccessKeyName == null || credentialInfo.sharedAccessKey == null || credentialInfo.eventHubPath == null) { throw new IllegalArgumentException("Could not parse 'connectionString'." + "Expected format: 'Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};" + "SharedAccessKey={sharedAccessKey};EntityPath={eventHubPath}'. Actual:" + connectionString); } return credentialInfo; } }
Private method is still up here.
private static URI getURI(String endpointFormat, String namespaceName, String domainName) { try { return new URI(String.format(Locale.US, endpointFormat, namespaceName, domainName)); } catch (URISyntaxException exception) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid namespace name: %s", namespaceName), exception); } }
return new URI(String.format(Locale.US, endpointFormat, namespaceName, domainName));
private static URI getURI(String endpointFormat, String namespaceName, String domainName) { try { return new URI(String.format(Locale.US, endpointFormat, namespaceName, domainName)); } catch (URISyntaxException exception) { throw new IllegalArgumentException(String.format(Locale.US, "Invalid namespace name: %s", namespaceName), exception); } }
class EventHubClientBuilderTest { private static final String END_POINT_FORMAT = "sb: private static final String NAMESPACE_NAME = "dummyNamespaceName"; private static final String DEFAULT_DOMAIN_NAME = "servicebus.windows.net/"; private static final String SHARED_ACCESS_KEY_NAME = "dummySasKeyName"; private static final String SHARED_ACCESS_KEY = "dummySasKey"; private static final String END_POINT_STR = getURI(END_POINT_FORMAT, NAMESPACE_NAME, DEFAULT_DOMAIN_NAME).toString(); private static final String PROXY_HOST = "/127.0.0.1"; private static final String PROXY_PORT = "3128"; private static final String correctConnStr = String.format("Endpoint=%s;SharedAccessKeyName=%s;SharedAccessKey=%s", END_POINT_STR, SHARED_ACCESS_KEY_NAME, SHARED_ACCESS_KEY); private static Proxy proxyAddress = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(PROXY_HOST, Integer.parseInt(PROXY_PORT))); private static final CredentialInfo credentialInfo = new CredentialInfo(correctConnStr); @Test(expected=IllegalArgumentException.class) public void missingConnStrBuilder() { EventHubClientBuilder builder = new EventHubClientBuilder(); builder.build(); } @Test public void defaultProxyConfigurationBuilder() { EventHubClientBuilder builder = new EventHubClientBuilder(); builder.credentials(this.credentialInfo).build(); } @Test public void customNoneProxyConfigurationBuilder() { EventHubClientBuilder builder = new EventHubClientBuilder(); ProxyConfiguration proxyConfig = new ProxyConfiguration(ProxyAuthenticationType.NONE, this.proxyAddress, null, null); builder.credentials(this.credentialInfo) .proxy(proxyConfig) .build(); } }
class EventHubClientBuilderTest { private static final String NAMESPACE_NAME = "dummyNamespaceName"; private static final String DEFAULT_DOMAIN_NAME = "servicebus.windows.net/"; private static final String ENTITY_PATH = "dummyEntityPath"; private static final String SHARED_ACCESS_KEY_NAME = "dummySasKeyName"; private static final String SHARED_ACCESS_KEY = "dummySasKey"; private static final String ENDPOINT = getURI(ClientConstants.ENDPOINT_FORMAT, NAMESPACE_NAME, DEFAULT_DOMAIN_NAME).toString(); private static final String PROXY_HOST = "127.0.0.1"; private static final String PROXY_PORT = "3128"; private static final String CORRECT_CONNECTION_STRING = String.format("Endpoint=%s;SharedAccessKeyName=%s;SharedAccessKey=%s;EntityPath=%s", ENDPOINT, SHARED_ACCESS_KEY_NAME, SHARED_ACCESS_KEY, ENTITY_PATH); private static Proxy proxyAddress = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(PROXY_HOST, Integer.parseInt(PROXY_PORT))); private static final CredentialInfo VALID_CREDENTIAL_INFO = CredentialInfo.from(CORRECT_CONNECTION_STRING); @Test(expected = IllegalArgumentException.class) public void missingConnStrBuilder() { EventHubClientBuilder builder = new EventHubClientBuilder(); builder.build(); } @Test public void defaultProxyConfigurationBuilder() { EventHubClientBuilder builder = new EventHubClientBuilder(); builder.credentials(VALID_CREDENTIAL_INFO).build(); } @Test public void customNoneProxyConfigurationBuilder() { EventHubClientBuilder builder = new EventHubClientBuilder(); ProxyConfiguration proxyConfig = new ProxyConfiguration(ProxyAuthenticationType.NONE, this.proxyAddress, null, null); builder.credentials(VALID_CREDENTIAL_INFO) .proxyConfiguration(proxyConfig) .build(); } }
nit: You don't need to specify `this` for all your variable accesses in this method. There is no confusion between whether the variable belongs to the class or is scoped locally. imho, it adds extra reading when I already know we are trying to set the variables for the class. In the case of the constructor, where it's common to see the parameter variable the same as the member variable, then I'd have to use `this`.
public EventHubClient build() { this.configuration = this.configuration == null ? ConfigurationManager.getConfiguration().clone() : this.configuration; if (this.credentials == null) { String connectionString = this.configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Connection string is null or empty."); } this.credentials = CredentialInfo.from(connectionString); } if (this.timeout == null) { this.timeout = Duration.ofSeconds(60); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final SharedAccessSignatureTokenProvider tokenProvider; try { tokenProvider = new SharedAccessSignatureTokenProvider(credentials.sharedAccessKeyName(), credentials.sharedAccessKey()); } catch (NoSuchAlgorithmException | InvalidKeyException e) { throw new AzureException("Could not create token provider."); } if (this.retry == null) { this.retry = Retry.getDefaultRetry(); } this.proxyConfiguration = constructDefaultProxyConfiguration(this.configuration); if (this.scheduler == null) { this.scheduler = Schedulers.elastic(); } ConnectionParameters connectionParameters = new ConnectionParameters(this.credentials, this.timeout, tokenProvider, this.transport, this.retry, this.proxyConfiguration, this.scheduler); return new EventHubClient(connectionParameters, provider, handlerProvider); }
this.configuration = this.configuration == null ? ConfigurationManager.getConfiguration().clone() : this.configuration;
public EventHubClient build() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Connection string is null or empty."); } credentials = CredentialInfo.from(connectionString); } if (timeout == null) { timeout = Duration.ofSeconds(60); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); final SharedAccessSignatureTokenProvider tokenProvider; try { tokenProvider = new SharedAccessSignatureTokenProvider(credentials.sharedAccessKeyName(), credentials.sharedAccessKey()); } catch (NoSuchAlgorithmException | InvalidKeyException e) { throw new AzureException("Could not create token provider."); } if (retry == null) { retry = Retry.getDefaultRetry(); } proxyConfiguration = constructDefaultProxyConfiguration(configuration); if (scheduler == null) { scheduler = Schedulers.elastic(); } ConnectionParameters connectionParameters = new ConnectionParameters(credentials, timeout, tokenProvider, transport, retry, proxyConfiguration, scheduler); return new EventHubClient(connectionParameters, tokenProvider, provider, handlerProvider, scheduler); }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private CredentialInfo credentials; private Configuration configuration; private Duration timeout; private ProxyConfiguration proxyConfiguration; private Retry retry; private Scheduler scheduler; private TransportType transport; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { this.transport = TransportType.AMQP; } /** * Sets the credentials information from connection string * * @param credentials Credentials for the EventHubClient. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder credentials(CredentialInfo credentials) { this.credentials = credentials; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param timeout Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration timeout) { this.timeout = timeout; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retry The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(Retry retry) { this.retry = retry; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * Use the default not null values if the Connection parameters are not provided. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty. * @throws AzureException If the token provider cannot be created for authorizing requests. */ private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (this.proxyConfiguration != null) { authentication = this.proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); Proxy proxy = null; if (proxyAddress != null) { String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } String host = hostPort[0]; Integer port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); } }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private CredentialInfo credentials; private Configuration configuration; private Duration timeout; private ProxyConfiguration proxyConfiguration; private Retry retry; private Scheduler scheduler; private TransportType transport; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { transport = TransportType.AMQP; } /** * Sets the credentials information from connection string * * @param credentials Credentials for the EventHubClient. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder credentials(CredentialInfo credentials) { this.credentials = credentials; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param timeout Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration timeout) { this.timeout = timeout; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retry The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(Retry retry) { this.retry = retry; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * Use the default not null values if the Connection parameters are not provided. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException when 'connectionString' is {@code null} or empty. * @throws AzureException If the token provider cannot be created for authorizing requests. */ private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyConfiguration != null) { authentication = proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); Proxy proxy = null; if (proxyAddress != null) { String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } String host = hostPort[0]; Integer port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); } }
Since you're covering this case below, should we consider removing this so that the message is consistent for `null` and `""` ?
public ErrorContext(final Throwable exception, final String namespaceName) { Objects.requireNonNull(exception); if (ImplUtils.isNullOrEmpty(namespaceName)) { throw new IllegalArgumentException("'namespaceName' cannot be null or empty"); } this.namespaceName = namespaceName; this.exception = exception; }
Objects.requireNonNull(exception);
public ErrorContext(final Throwable exception, final String namespaceName) { Objects.requireNonNull(exception); if (ImplUtils.isNullOrEmpty(namespaceName)) { throw new IllegalArgumentException("'namespaceName' cannot be null or empty"); } this.namespaceName = namespaceName; this.exception = exception; }
class ErrorContext implements Serializable { private static final long serialVersionUID = -2819764407122954922L; private final String namespaceName; private final Throwable exception; /** * Creates a new instance with the provided {@code namespaceName}. * * @param exception Exception that caused this error. * @param namespaceName Event Hub namespace of the error context. * @throws IllegalArgumentException when 'namespaceName' is {@code null} or empty. */ /** * Gets the namespace for this error. * * @return The namespace for this error. */ public String namespaceName() { return namespaceName; } /** * Gets the exception wrapped in this context. * * @return The exception that caused the error. */ public Throwable exception() { return exception; } /** * {@inheritDoc} */ @Override public String toString() { return String.format(Locale.US, "NS: %s. Exception: %s", namespaceName, exception); } }
class ErrorContext implements Serializable { private static final long serialVersionUID = -2819764407122954922L; private final String namespaceName; private final Throwable exception; /** * Creates a new instance with the provided {@code namespaceName}. * * @param exception Exception that caused this error. * @param namespaceName Event Hub namespace of the error context. * @throws NullPointerException when {@code exception} is {@code null}. * @throws IllegalArgumentException when {@code namespaceName} is {@code null} or empty. */ /** * Gets the namespace for this error. * * @return The namespace for this error. */ public String namespaceName() { return namespaceName; } /** * Gets the exception wrapped in this context. * * @return The exception that caused the error. */ public Throwable exception() { return exception; } /** * {@inheritDoc} */ @Override public String toString() { return String.format(Locale.US, "NS: %s. Exception: %s", namespaceName, exception); } }
It will throw a NullPointerException if exception == null. I updated the docs to state this.
public ErrorContext(final Throwable exception, final String namespaceName) { Objects.requireNonNull(exception); if (ImplUtils.isNullOrEmpty(namespaceName)) { throw new IllegalArgumentException("'namespaceName' cannot be null or empty"); } this.namespaceName = namespaceName; this.exception = exception; }
Objects.requireNonNull(exception);
public ErrorContext(final Throwable exception, final String namespaceName) { Objects.requireNonNull(exception); if (ImplUtils.isNullOrEmpty(namespaceName)) { throw new IllegalArgumentException("'namespaceName' cannot be null or empty"); } this.namespaceName = namespaceName; this.exception = exception; }
class ErrorContext implements Serializable { private static final long serialVersionUID = -2819764407122954922L; private final String namespaceName; private final Throwable exception; /** * Creates a new instance with the provided {@code namespaceName}. * * @param exception Exception that caused this error. * @param namespaceName Event Hub namespace of the error context. * @throws IllegalArgumentException when 'namespaceName' is {@code null} or empty. */ /** * Gets the namespace for this error. * * @return The namespace for this error. */ public String namespaceName() { return namespaceName; } /** * Gets the exception wrapped in this context. * * @return The exception that caused the error. */ public Throwable exception() { return exception; } /** * {@inheritDoc} */ @Override public String toString() { return String.format(Locale.US, "NS: %s. Exception: %s", namespaceName, exception); } }
class ErrorContext implements Serializable { private static final long serialVersionUID = -2819764407122954922L; private final String namespaceName; private final Throwable exception; /** * Creates a new instance with the provided {@code namespaceName}. * * @param exception Exception that caused this error. * @param namespaceName Event Hub namespace of the error context. * @throws NullPointerException when {@code exception} is {@code null}. * @throws IllegalArgumentException when {@code namespaceName} is {@code null} or empty. */ /** * Gets the namespace for this error. * * @return The namespace for this error. */ public String namespaceName() { return namespaceName; } /** * Gets the exception wrapped in this context. * * @return The exception that caused the error. */ public Throwable exception() { return exception; } /** * {@inheritDoc} */ @Override public String toString() { return String.format(Locale.US, "NS: %s. Exception: %s", namespaceName, exception); } }
Should this be an AmqpException?
public void close() { if (hasConnection.getAndSet(false)) { try { final AmqpConnection connection = connectionMono.block(timeout); if (connection != null) { connection.close(); } } catch (IOException exception) { throw new AzureException("Unable to close connection to service", exception); } } }
throw new AzureException("Unable to close connection to service", exception);
public void close() { if (hasConnection.getAndSet(false)) { try { final AmqpConnection connection = connectionMono.block(timeout); if (connection != null) { connection.close(); } } catch (IOException exception) { throw new AmqpException(false, "Unable to close connection to service", exception); } } }
class EventHubClient implements Closeable { private final String connectionId; private final Mono<AmqpConnection> connectionMono; private final String host; private final AtomicBoolean hasConnection = new AtomicBoolean(false); private final ConnectionStringBuilder connectionStringBuilder; private final Duration timeout = Duration.ofSeconds(45); EventHubClient(ConnectionStringBuilder connectionStringBuilder, TokenProvider tokenProvider, ReactorProvider provider, ReactorHandlerProvider handlerProvider, Scheduler scheduler) { Objects.requireNonNull(connectionStringBuilder, "'connectionStringBuilder' is null"); Objects.requireNonNull(connectionStringBuilder.endpoint(), "'connectionStringBuilder.endpoint()' is null."); this.connectionStringBuilder = connectionStringBuilder; this.host = connectionStringBuilder.endpoint().getHost(); this.connectionId = StringUtil.getRandomString("MF"); this.connectionMono = Mono.fromCallable(() -> ReactorConnection.create(connectionId, host, tokenProvider, provider, handlerProvider, scheduler)) .doOnSubscribe(c -> hasConnection.set(true)) .cache(); } /** * Creates a builder that can configure options for the {@link EventHubClient} before creating an instance of it. * * @return A new {@link EventHubClientBuilder} to createReactor an EventHubClient from. */ public static EventHubClientBuilder builder() { return new EventHubClientBuilder(); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ public Mono<EventHubProperties> getHubProperties() { return connectionMono.flatMap(connection -> { final String audience = String.format(Locale.US, "amqp: return connection.getCBSNode().flatMap(node -> node.authorize(audience, Duration.ofMinutes(5))); }).then(Mono.fromCallable(() -> { return new EventHubProperties("Some path", Instant.now().minus(Period.ofDays(1)), new String[]{"0", "1"}, Instant.now()); })); } /** * Retrieves the set of identifiers for the partitions of an Event Hub. * * @return The set of identifiers for the partitions of an Event Hub. */ public Mono<String[]> getPartitionIds() { return getHubProperties().map(EventHubProperties::partitionIds); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return Mono.empty(); } /** * Creates a sender that transmits events to Event Hub. Event data is automatically routed to an available * partition. * * @return A new {@link EventSender}. */ public EventSender createSender() { return new EventSender(); } /** * Creates a sender that can push events to an Event Hub. If * {@link SenderOptions * specific partition. Otherwise, events are automatically routed to an available partition. * * @param options The set of options to apply when creating the sender. * @return A new {@link EventSender}. */ public EventSender createSender(SenderOptions options) { return new EventSender(options); } /** * Creates a receiver that listens to the Event Hub {@code partitionId} starting from the moment it was created. The * consumer group used is the {@link ReceiverOptions * * @param partitionId The identifier of the Event Hub partition. * @return An new {@link EventReceiver} that receives events from the partition at the given position. */ public EventReceiver createReceiver(String partitionId) { return new EventReceiver(); } /** * Creates a receiver that listens to the Event Hub {@code partitionId} at the given {@link EventPosition} with the * provided options. * * @param partitionId The identifier of the Event Hub partition. * @param options Additional options for the receiver. * @return An new {@link EventReceiver} that receives events from the partition at the given position. */ public EventReceiver createReceiver(String partitionId, ReceiverOptions options) { return new EventReceiver(); } /** * Closes and disposes of connection to service. Any {@link EventReceiver EventReceivers} and * {@link EventSender EventSenders} created with this instance will have their connections closed. */ @Override }
class EventHubClient implements Closeable { private final String connectionId; private final Mono<AmqpConnection> connectionMono; private final String host; private final AtomicBoolean hasConnection = new AtomicBoolean(false); private final ConnectionStringBuilder connectionStringBuilder; private final Duration timeout = Duration.ofSeconds(45); private final String eventHubName; EventHubClient(ConnectionStringBuilder connectionStringBuilder, TokenProvider tokenProvider, ReactorProvider provider, ReactorHandlerProvider handlerProvider, Scheduler scheduler) { Objects.requireNonNull(connectionStringBuilder, "'connectionStringBuilder' is null"); Objects.requireNonNull(connectionStringBuilder.endpoint(), "'connectionStringBuilder.endpoint()' is null."); this.connectionStringBuilder = connectionStringBuilder; this.eventHubName = connectionStringBuilder.eventHubName(); this.host = connectionStringBuilder.endpoint().getHost(); this.connectionId = StringUtil.getRandomString("MF"); this.connectionMono = Mono.fromCallable(() -> ReactorConnection.create(connectionId, host, tokenProvider, provider, handlerProvider, scheduler)) .doOnSubscribe(c -> hasConnection.set(true)) .cache(); } /** * Creates a builder that can configure options for the {@link EventHubClient} before creating an instance of it. * * @return A new {@link EventHubClientBuilder} to create an EventHubClient from. */ public static EventHubClientBuilder builder() { return new EventHubClientBuilder(); } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ public Mono<EventHubProperties> getProperties() { return connectionMono.flatMap(connection -> { final String audience = String.format(Locale.US, "amqp: return connection.getCBSNode().flatMap(node -> node.authorize(audience, Duration.ofMinutes(5))); }).then(Mono.fromCallable(() -> { return new EventHubProperties("Some path", Instant.now().minus(Period.ofDays(1)), new String[]{"0", "1"}, Instant.now()); })); } /** * Retrieves the set of identifiers for the partitions of an Event Hub. * * @return The set of identifiers for the partitions of an Event Hub. */ public Mono<String[]> getPartitionIds() { return getProperties().map(EventHubProperties::partitionIds); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return Mono.empty(); } /** * Creates a sender that transmits events to Event Hub. Event data is automatically routed to an available * partition. * * @return A new {@link EventSender}. */ public EventSender createSender() { return new EventSender(); } /** * Creates a sender that can push events to an Event Hub. If * {@link SenderOptions * specific partition. Otherwise, events are automatically routed to an available partition. * * @param options The set of options to apply when creating the sender. * @return A new {@link EventSender}. */ public EventSender createSender(SenderOptions options) { return new EventSender(options); } /** * Creates a receiver that listens to the Event Hub {@code partitionId} starting from the moment it was created. The * consumer group used is the {@link ReceiverOptions * * @param partitionId The identifier of the Event Hub partition. * @return An new {@link EventReceiver} that receives events from the partition at the given position. */ public EventReceiver createReceiver(String partitionId) { return new EventReceiver(); } /** * Creates a receiver that listens to the Event Hub {@code partitionId} at the given {@link EventPosition} with the * provided options. * * @param partitionId The identifier of the Event Hub partition. * @param options Additional options for the receiver. * @return An new {@link EventReceiver} that receives events from the partition at the given position. */ public EventReceiver createReceiver(String partitionId, ReceiverOptions options) { return new EventReceiver(); } /** * Closes and disposes of connection to service. Any {@link EventReceiver EventReceivers} and * {@link EventSender EventSenders} created with this instance will have their connections closed. */ @Override }
If the lastException is null or not an instance of AmqpException, we should return `null`. For example, an IllegalArgumentException is not retriable.
public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) { int baseWaitTime = 0; if (lastException == null || !(lastException instanceof AmqpException)) { return this.onGetNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); } if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) { baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS; } return this.onGetNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); }
if (lastException == null || !(lastException instanceof AmqpException)) {
public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) { int baseWaitTime = 0; if (!isRetriableException(lastException)) { return null; } if (retryCount.get() >= maxRetryCount) { return null; } if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) { baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS; } return this.calculateNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); }
class Retry { public static final Retry NO_RETRY = new RetryExponential(Duration.ofSeconds(0), Duration.ofSeconds(0), 0); private AtomicInteger retryCount = new AtomicInteger(0); /** * Check if the existing exception is a retryable exception. * * @param exception A exception that was observed for the operation to be retried. * @return true if the exception is a retryable exception, otherwise false. * @throws IllegalArgumentException when the exception is null. */ public static boolean isRetryableException(Exception exception) { if (exception == null) { throw new IllegalArgumentException("exception cannot be null"); } if (exception instanceof AmqpException) { return ((AmqpException) exception).isTransient(); } return false; } /** * Get 'NO_RETRY' of current. * * @return Retry 'NO_RETRY'. */ public static Retry getNoRetry() { return Retry.NO_RETRY; } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getDefault() { return new RetryExponential( ClientConstants.DEFAULT_RETRY_MIN_BACKOFF, ClientConstants.DEFAULT_RETRY_MAX_BACKOFF, ClientConstants.DEFAULT_MAX_RETRY_COUNT); } /** * Increase one count to current count value. * * @return current AtomicInteger value. */ public int incrementRetryCount() { return retryCount.incrementAndGet(); } /** * Get the current retried count. * * @return current AtomicInteger value. */ public int getRetryCount() { return retryCount.get(); } /** * reset AtomicInteger to value zero. */ public void resetRetryInterval() { retryCount.set(0); } /** * Calculates the amount of time to delay before the next retry attempt * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried. */ /** * Allows a concrete retry policy implementation to offer a base retry interval to be used in * the calculations performed by 'Retry.GetNextRetryInterval'. * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @param baseWaitSeconds The number of seconds to base the suggested retry interval on; * this should be used as the minimum interval returned under normal circumstances. * @param retryCount The number of retries that have already been attempted. * @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried. */ protected abstract Duration onGetNextRetryInterval(Exception lastException, Duration remainingTime, int baseWaitSeconds, int retryCount); }
class Retry { public static final Duration DEFAULT_RETRY_MIN_BACKOFF = Duration.ofSeconds(0); public static final Duration DEFAULT_RETRY_MAX_BACKOFF = Duration.ofSeconds(30); public static final int DEFAULT_MAX_RETRY_COUNT = 10; private final AtomicInteger retryCount = new AtomicInteger(); private final int maxRetryCount; /** * Creates a new instance of Retry with the maximum retry count of {@code maxRetryCount} * * @param maxRetryCount The maximum number of retries allowed. */ public Retry(int maxRetryCount) { this.maxRetryCount = maxRetryCount; } /** * Check if the existing exception is a retriable exception. * * @param exception An exception that was observed for the operation to be retried. * @return true if the exception is a retriable exception, otherwise false. */ public static boolean isRetriableException(Exception exception) { return (exception instanceof AmqpException) && ((AmqpException) exception).isTransient(); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getNoRetry() { return new ExponentialRetry(Duration.ZERO, Duration.ZERO, 0); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getDefaultRetry() { return new ExponentialRetry(DEFAULT_RETRY_MIN_BACKOFF, DEFAULT_RETRY_MAX_BACKOFF, DEFAULT_MAX_RETRY_COUNT); } /** * Increase one count to current count value. * * @return current AtomicInteger value. */ public int incrementRetryCount() { return retryCount.incrementAndGet(); } /** * Get the current retried count. * * @return current AtomicInteger value. */ public int getRetryCount() { return retryCount.get(); } /** * Reset AtomicInteger to value zero. */ public void resetRetryInterval() { retryCount.set(0); } /** * Get the maximum allowed retry count. * * @return maximum allowed retry count value. */ public int maxRetryCount() { return maxRetryCount; } /** * Calculates the amount of time to delay before the next retry attempt * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @return The amount of time to delay before retrying the associated operation; if {@code null}, * then the operation is no longer eligible to be retried. */ /** * Allows a concrete retry policy implementation to offer a base retry interval to be used in * the calculations performed by 'Retry.GetNextRetryInterval'. * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @param baseWaitSeconds The number of seconds to base the suggested retry interval on; * this should be used as the minimum interval returned under normal circumstances. * @param retryCount The number of retries that have already been attempted. * @return The amount of time to delay before retrying the associated operation; if {@code null}, * then the operation is no longer eligible to be retried. */ protected abstract Duration calculateNextRetryInterval(Exception lastException, Duration remainingTime, int baseWaitSeconds, int retryCount); }
You should also test that the exception is retryable or not, and if they have hit the maximum number of attempts. These are checks that are agnostic of the retry algorithm.
public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) { int baseWaitTime = 0; if (lastException == null || !(lastException instanceof AmqpException)) { return this.onGetNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); } if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) { baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS; } return this.onGetNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); }
if (lastException == null || !(lastException instanceof AmqpException)) {
public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) { int baseWaitTime = 0; if (!isRetriableException(lastException)) { return null; } if (retryCount.get() >= maxRetryCount) { return null; } if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) { baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS; } return this.calculateNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); }
class Retry { public static final Retry NO_RETRY = new RetryExponential(Duration.ofSeconds(0), Duration.ofSeconds(0), 0); private AtomicInteger retryCount = new AtomicInteger(0); /** * Check if the existing exception is a retryable exception. * * @param exception A exception that was observed for the operation to be retried. * @return true if the exception is a retryable exception, otherwise false. * @throws IllegalArgumentException when the exception is null. */ public static boolean isRetryableException(Exception exception) { if (exception == null) { throw new IllegalArgumentException("exception cannot be null"); } if (exception instanceof AmqpException) { return ((AmqpException) exception).isTransient(); } return false; } /** * Get 'NO_RETRY' of current. * * @return Retry 'NO_RETRY'. */ public static Retry getNoRetry() { return Retry.NO_RETRY; } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getDefault() { return new RetryExponential( ClientConstants.DEFAULT_RETRY_MIN_BACKOFF, ClientConstants.DEFAULT_RETRY_MAX_BACKOFF, ClientConstants.DEFAULT_MAX_RETRY_COUNT); } /** * Increase one count to current count value. * * @return current AtomicInteger value. */ public int incrementRetryCount() { return retryCount.incrementAndGet(); } /** * Get the current retried count. * * @return current AtomicInteger value. */ public int getRetryCount() { return retryCount.get(); } /** * reset AtomicInteger to value zero. */ public void resetRetryInterval() { retryCount.set(0); } /** * Calculates the amount of time to delay before the next retry attempt * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried. */ /** * Allows a concrete retry policy implementation to offer a base retry interval to be used in * the calculations performed by 'Retry.GetNextRetryInterval'. * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @param baseWaitSeconds The number of seconds to base the suggested retry interval on; * this should be used as the minimum interval returned under normal circumstances. * @param retryCount The number of retries that have already been attempted. * @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried. */ protected abstract Duration onGetNextRetryInterval(Exception lastException, Duration remainingTime, int baseWaitSeconds, int retryCount); }
class Retry { public static final Duration DEFAULT_RETRY_MIN_BACKOFF = Duration.ofSeconds(0); public static final Duration DEFAULT_RETRY_MAX_BACKOFF = Duration.ofSeconds(30); public static final int DEFAULT_MAX_RETRY_COUNT = 10; private final AtomicInteger retryCount = new AtomicInteger(); private final int maxRetryCount; /** * Creates a new instance of Retry with the maximum retry count of {@code maxRetryCount} * * @param maxRetryCount The maximum number of retries allowed. */ public Retry(int maxRetryCount) { this.maxRetryCount = maxRetryCount; } /** * Check if the existing exception is a retriable exception. * * @param exception An exception that was observed for the operation to be retried. * @return true if the exception is a retriable exception, otherwise false. */ public static boolean isRetriableException(Exception exception) { return (exception instanceof AmqpException) && ((AmqpException) exception).isTransient(); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getNoRetry() { return new ExponentialRetry(Duration.ZERO, Duration.ZERO, 0); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getDefaultRetry() { return new ExponentialRetry(DEFAULT_RETRY_MIN_BACKOFF, DEFAULT_RETRY_MAX_BACKOFF, DEFAULT_MAX_RETRY_COUNT); } /** * Increase one count to current count value. * * @return current AtomicInteger value. */ public int incrementRetryCount() { return retryCount.incrementAndGet(); } /** * Get the current retried count. * * @return current AtomicInteger value. */ public int getRetryCount() { return retryCount.get(); } /** * Reset AtomicInteger to value zero. */ public void resetRetryInterval() { retryCount.set(0); } /** * Get the maximum allowed retry count. * * @return maximum allowed retry count value. */ public int maxRetryCount() { return maxRetryCount; } /** * Calculates the amount of time to delay before the next retry attempt * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @return The amount of time to delay before retrying the associated operation; if {@code null}, * then the operation is no longer eligible to be retried. */ /** * Allows a concrete retry policy implementation to offer a base retry interval to be used in * the calculations performed by 'Retry.GetNextRetryInterval'. * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @param baseWaitSeconds The number of seconds to base the suggested retry interval on; * this should be used as the minimum interval returned under normal circumstances. * @param retryCount The number of retries that have already been attempted. * @return The amount of time to delay before retrying the associated operation; if {@code null}, * then the operation is no longer eligible to be retried. */ protected abstract Duration calculateNextRetryInterval(Exception lastException, Duration remainingTime, int baseWaitSeconds, int retryCount); }
What I mean is that the logic for if the exception is retryable or not and max attempts (including maxRetryCount) should be in Retry.java rather than ExponentialRetry.java because the max retries and is transient applies to _all_ retry policies.
public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) { int baseWaitTime = 0; if (lastException == null || !(lastException instanceof AmqpException)) { return this.onGetNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); } if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) { baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS; } return this.onGetNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); }
if (lastException == null || !(lastException instanceof AmqpException)) {
public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) { int baseWaitTime = 0; if (!isRetriableException(lastException)) { return null; } if (retryCount.get() >= maxRetryCount) { return null; } if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) { baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS; } return this.calculateNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); }
class Retry { public static final Retry NO_RETRY = new RetryExponential(Duration.ofSeconds(0), Duration.ofSeconds(0), 0); private AtomicInteger retryCount = new AtomicInteger(0); /** * Check if the existing exception is a retryable exception. * * @param exception A exception that was observed for the operation to be retried. * @return true if the exception is a retryable exception, otherwise false. * @throws IllegalArgumentException when the exception is null. */ public static boolean isRetryableException(Exception exception) { if (exception == null) { throw new IllegalArgumentException("exception cannot be null"); } if (exception instanceof AmqpException) { return ((AmqpException) exception).isTransient(); } return false; } /** * Get 'NO_RETRY' of current. * * @return Retry 'NO_RETRY'. */ public static Retry getNoRetry() { return Retry.NO_RETRY; } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getDefault() { return new RetryExponential( ClientConstants.DEFAULT_RETRY_MIN_BACKOFF, ClientConstants.DEFAULT_RETRY_MAX_BACKOFF, ClientConstants.DEFAULT_MAX_RETRY_COUNT); } /** * Increase one count to current count value. * * @return current AtomicInteger value. */ public int incrementRetryCount() { return retryCount.incrementAndGet(); } /** * Get the current retried count. * * @return current AtomicInteger value. */ public int getRetryCount() { return retryCount.get(); } /** * reset AtomicInteger to value zero. */ public void resetRetryInterval() { retryCount.set(0); } /** * Calculates the amount of time to delay before the next retry attempt * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried. */ /** * Allows a concrete retry policy implementation to offer a base retry interval to be used in * the calculations performed by 'Retry.GetNextRetryInterval'. * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @param baseWaitSeconds The number of seconds to base the suggested retry interval on; * this should be used as the minimum interval returned under normal circumstances. * @param retryCount The number of retries that have already been attempted. * @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried. */ protected abstract Duration onGetNextRetryInterval(Exception lastException, Duration remainingTime, int baseWaitSeconds, int retryCount); }
class Retry { public static final Duration DEFAULT_RETRY_MIN_BACKOFF = Duration.ofSeconds(0); public static final Duration DEFAULT_RETRY_MAX_BACKOFF = Duration.ofSeconds(30); public static final int DEFAULT_MAX_RETRY_COUNT = 10; private final AtomicInteger retryCount = new AtomicInteger(); private final int maxRetryCount; /** * Creates a new instance of Retry with the maximum retry count of {@code maxRetryCount} * * @param maxRetryCount The maximum number of retries allowed. */ public Retry(int maxRetryCount) { this.maxRetryCount = maxRetryCount; } /** * Check if the existing exception is a retriable exception. * * @param exception An exception that was observed for the operation to be retried. * @return true if the exception is a retriable exception, otherwise false. */ public static boolean isRetriableException(Exception exception) { return (exception instanceof AmqpException) && ((AmqpException) exception).isTransient(); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getNoRetry() { return new ExponentialRetry(Duration.ZERO, Duration.ZERO, 0); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getDefaultRetry() { return new ExponentialRetry(DEFAULT_RETRY_MIN_BACKOFF, DEFAULT_RETRY_MAX_BACKOFF, DEFAULT_MAX_RETRY_COUNT); } /** * Increase one count to current count value. * * @return current AtomicInteger value. */ public int incrementRetryCount() { return retryCount.incrementAndGet(); } /** * Get the current retried count. * * @return current AtomicInteger value. */ public int getRetryCount() { return retryCount.get(); } /** * Reset AtomicInteger to value zero. */ public void resetRetryInterval() { retryCount.set(0); } /** * Get the maximum allowed retry count. * * @return maximum allowed retry count value. */ public int maxRetryCount() { return maxRetryCount; } /** * Calculates the amount of time to delay before the next retry attempt * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @return The amount of time to delay before retrying the associated operation; if {@code null}, * then the operation is no longer eligible to be retried. */ /** * Allows a concrete retry policy implementation to offer a base retry interval to be used in * the calculations performed by 'Retry.GetNextRetryInterval'. * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @param baseWaitSeconds The number of seconds to base the suggested retry interval on; * this should be used as the minimum interval returned under normal circumstances. * @param retryCount The number of retries that have already been attempted. * @return The amount of time to delay before retrying the associated operation; if {@code null}, * then the operation is no longer eligible to be retried. */ protected abstract Duration calculateNextRetryInterval(Exception lastException, Duration remainingTime, int baseWaitSeconds, int retryCount); }
you're missing a verification that this AmqpException is also transient.
public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) { int baseWaitTime = 0; if (lastException == null || !(lastException instanceof AmqpException)) { return null; } if (this.retryCount.get() >= maxRetryCount) { return null; } if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) { baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS; } return this.calculateNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); }
if (this.retryCount.get() >= maxRetryCount) {
public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) { int baseWaitTime = 0; if (!isRetriableException(lastException)) { return null; } if (retryCount.get() >= maxRetryCount) { return null; } if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) { baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS; } return this.calculateNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); }
class Retry { public static final Duration DEFAULT_RETRY_MIN_BACKOFF = Duration.ofSeconds(0); public static final Duration DEFAULT_RETRY_MAX_BACKOFF = Duration.ofSeconds(30); public static final int DEFAULT_MAX_RETRY_COUNT = 10; private final AtomicInteger retryCount = new AtomicInteger(); private final int maxRetryCount; /** * An abstract representation of a policy to govern retrying of messaging operations. * @param maxRetryCount The maximum number of retries allowed. */ public Retry(int maxRetryCount) { this.maxRetryCount = maxRetryCount; } /** * Check if the existing exception is a retriable exception. * * @param exception An exception that was observed for the operation to be retried. * @return true if the exception is a retriable exception, otherwise false. */ public static boolean isRetriableException(Exception exception) { return (exception instanceof AmqpException) && ((AmqpException) exception).isTransient(); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getNoRetry() { return new ExponentialRetry( Duration.ZERO, Duration.ZERO, 0); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getDefaultRetry() { return new ExponentialRetry( DEFAULT_RETRY_MIN_BACKOFF, DEFAULT_RETRY_MAX_BACKOFF, DEFAULT_MAX_RETRY_COUNT); } /** * Increase one count to current count value. * * @return current AtomicInteger value. */ public int incrementRetryCount() { return retryCount.incrementAndGet(); } /** * Get the current retried count. * * @return current AtomicInteger value. */ public int getRetryCount() { return retryCount.get(); } /** * Reset AtomicInteger to value zero. */ public void resetRetryInterval() { retryCount.set(0); } /** * Get the maximum allowed retry count. * * @return maximum allowed retry count value. */ public int maxRetryCount() { return this.maxRetryCount; } /** * Calculates the amount of time to delay before the next retry attempt * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried. */ /** * Allows a concrete retry policy implementation to offer a base retry interval to be used in * the calculations performed by 'Retry.GetNextRetryInterval'. * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @param baseWaitSeconds The number of seconds to base the suggested retry interval on; * this should be used as the minimum interval returned under normal circumstances. * @param retryCount The number of retries that have already been attempted. * @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried. */ protected abstract Duration calculateNextRetryInterval(Exception lastException, Duration remainingTime, int baseWaitSeconds, int retryCount); }
class Retry { public static final Duration DEFAULT_RETRY_MIN_BACKOFF = Duration.ofSeconds(0); public static final Duration DEFAULT_RETRY_MAX_BACKOFF = Duration.ofSeconds(30); public static final int DEFAULT_MAX_RETRY_COUNT = 10; private final AtomicInteger retryCount = new AtomicInteger(); private final int maxRetryCount; /** * Creates a new instance of Retry with the maximum retry count of {@code maxRetryCount} * * @param maxRetryCount The maximum number of retries allowed. */ public Retry(int maxRetryCount) { this.maxRetryCount = maxRetryCount; } /** * Check if the existing exception is a retriable exception. * * @param exception An exception that was observed for the operation to be retried. * @return true if the exception is a retriable exception, otherwise false. */ public static boolean isRetriableException(Exception exception) { return (exception instanceof AmqpException) && ((AmqpException) exception).isTransient(); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getNoRetry() { return new ExponentialRetry(Duration.ZERO, Duration.ZERO, 0); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getDefaultRetry() { return new ExponentialRetry(DEFAULT_RETRY_MIN_BACKOFF, DEFAULT_RETRY_MAX_BACKOFF, DEFAULT_MAX_RETRY_COUNT); } /** * Increase one count to current count value. * * @return current AtomicInteger value. */ public int incrementRetryCount() { return retryCount.incrementAndGet(); } /** * Get the current retried count. * * @return current AtomicInteger value. */ public int getRetryCount() { return retryCount.get(); } /** * Reset AtomicInteger to value zero. */ public void resetRetryInterval() { retryCount.set(0); } /** * Get the maximum allowed retry count. * * @return maximum allowed retry count value. */ public int maxRetryCount() { return maxRetryCount; } /** * Calculates the amount of time to delay before the next retry attempt * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @return The amount of time to delay before retrying the associated operation; if {@code null}, * then the operation is no longer eligible to be retried. */ /** * Allows a concrete retry policy implementation to offer a base retry interval to be used in * the calculations performed by 'Retry.GetNextRetryInterval'. * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @param baseWaitSeconds The number of seconds to base the suggested retry interval on; * this should be used as the minimum interval returned under normal circumstances. * @param retryCount The number of retries that have already been attempted. * @return The amount of time to delay before retrying the associated operation; if {@code null}, * then the operation is no longer eligible to be retried. */ protected abstract Duration calculateNextRetryInterval(Exception lastException, Duration remainingTime, int baseWaitSeconds, int retryCount); }
nit: you don't need to specify `this`. There is no other variable declared in this scope with the same name.
public int maxRetryCount() { return this.maxRetryCount; }
return this.maxRetryCount;
public int maxRetryCount() { return maxRetryCount; }
class Retry { public static final Duration DEFAULT_RETRY_MIN_BACKOFF = Duration.ofSeconds(0); public static final Duration DEFAULT_RETRY_MAX_BACKOFF = Duration.ofSeconds(30); public static final int DEFAULT_MAX_RETRY_COUNT = 10; private final AtomicInteger retryCount = new AtomicInteger(); private final int maxRetryCount; /** * An abstract representation of a policy to govern retrying of messaging operations. * @param maxRetryCount The maximum number of retries allowed. */ public Retry(int maxRetryCount) { this.maxRetryCount = maxRetryCount; } /** * Check if the existing exception is a retriable exception. * * @param exception An exception that was observed for the operation to be retried. * @return true if the exception is a retriable exception, otherwise false. */ public static boolean isRetriableException(Exception exception) { return (exception instanceof AmqpException) && ((AmqpException) exception).isTransient(); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getNoRetry() { return new ExponentialRetry( Duration.ZERO, Duration.ZERO, 0); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getDefaultRetry() { return new ExponentialRetry( DEFAULT_RETRY_MIN_BACKOFF, DEFAULT_RETRY_MAX_BACKOFF, DEFAULT_MAX_RETRY_COUNT); } /** * Increase one count to current count value. * * @return current AtomicInteger value. */ public int incrementRetryCount() { return retryCount.incrementAndGet(); } /** * Get the current retried count. * * @return current AtomicInteger value. */ public int getRetryCount() { return retryCount.get(); } /** * Reset AtomicInteger to value zero. */ public void resetRetryInterval() { retryCount.set(0); } /** * Get the maximum allowed retry count. * * @return maximum allowed retry count value. */ /** * Calculates the amount of time to delay before the next retry attempt * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried. */ public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) { int baseWaitTime = 0; if (lastException == null || !(lastException instanceof AmqpException)) { return null; } if (this.retryCount.get() >= maxRetryCount) { return null; } if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) { baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS; } return this.calculateNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); } /** * Allows a concrete retry policy implementation to offer a base retry interval to be used in * the calculations performed by 'Retry.GetNextRetryInterval'. * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @param baseWaitSeconds The number of seconds to base the suggested retry interval on; * this should be used as the minimum interval returned under normal circumstances. * @param retryCount The number of retries that have already been attempted. * @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried. */ protected abstract Duration calculateNextRetryInterval(Exception lastException, Duration remainingTime, int baseWaitSeconds, int retryCount); }
class Retry { public static final Duration DEFAULT_RETRY_MIN_BACKOFF = Duration.ofSeconds(0); public static final Duration DEFAULT_RETRY_MAX_BACKOFF = Duration.ofSeconds(30); public static final int DEFAULT_MAX_RETRY_COUNT = 10; private final AtomicInteger retryCount = new AtomicInteger(); private final int maxRetryCount; /** * Creates a new instance of Retry with the maximum retry count of {@code maxRetryCount} * * @param maxRetryCount The maximum number of retries allowed. */ public Retry(int maxRetryCount) { this.maxRetryCount = maxRetryCount; } /** * Check if the existing exception is a retriable exception. * * @param exception An exception that was observed for the operation to be retried. * @return true if the exception is a retriable exception, otherwise false. */ public static boolean isRetriableException(Exception exception) { return (exception instanceof AmqpException) && ((AmqpException) exception).isTransient(); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getNoRetry() { return new ExponentialRetry(Duration.ZERO, Duration.ZERO, 0); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getDefaultRetry() { return new ExponentialRetry(DEFAULT_RETRY_MIN_BACKOFF, DEFAULT_RETRY_MAX_BACKOFF, DEFAULT_MAX_RETRY_COUNT); } /** * Increase one count to current count value. * * @return current AtomicInteger value. */ public int incrementRetryCount() { return retryCount.incrementAndGet(); } /** * Get the current retried count. * * @return current AtomicInteger value. */ public int getRetryCount() { return retryCount.get(); } /** * Reset AtomicInteger to value zero. */ public void resetRetryInterval() { retryCount.set(0); } /** * Get the maximum allowed retry count. * * @return maximum allowed retry count value. */ /** * Calculates the amount of time to delay before the next retry attempt * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @return The amount of time to delay before retrying the associated operation; if {@code null}, * then the operation is no longer eligible to be retried. */ public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) { int baseWaitTime = 0; if (!isRetriableException(lastException)) { return null; } if (retryCount.get() >= maxRetryCount) { return null; } if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) { baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS; } return this.calculateNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); } /** * Allows a concrete retry policy implementation to offer a base retry interval to be used in * the calculations performed by 'Retry.GetNextRetryInterval'. * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @param baseWaitSeconds The number of seconds to base the suggested retry interval on; * this should be used as the minimum interval returned under normal circumstances. * @param retryCount The number of retries that have already been attempted. * @return The amount of time to delay before retrying the associated operation; if {@code null}, * then the operation is no longer eligible to be retried. */ protected abstract Duration calculateNextRetryInterval(Exception lastException, Duration remainingTime, int baseWaitSeconds, int retryCount); }
nit: you don't need to specify `this`. There is no other variable declared in this scope with the same name.
private double computeRetryFactor() { final long deltaBackoff = this.maxBackoff.minus(this.minBackoff).getSeconds(); if (deltaBackoff <= 0 || super.maxRetryCount() <= 0) { return 0; } return Math.log(deltaBackoff) / Math.log(super.maxRetryCount()); }
final long deltaBackoff = this.maxBackoff.minus(this.minBackoff).getSeconds();
private double computeRetryFactor() { final long deltaBackoff = maxBackoff.minus(minBackoff).getSeconds(); if (deltaBackoff <= 0 || super.maxRetryCount() <= 0) { return 0; } return Math.log(deltaBackoff) / Math.log(super.maxRetryCount()); }
class ExponentialRetry extends Retry { public static final Duration TIMER_TOLERANCE = Duration.ofSeconds(1); private final Duration minBackoff; private final Duration maxBackoff; private final double retryFactor; /** * @param minBackoff The minimum time period permissible for backing off between retries. * @param maxBackoff The maximum time period permissible for backing off between retries. * @param maxRetryCount The maximum number of retries allowed. */ public ExponentialRetry(Duration minBackoff, Duration maxBackoff, int maxRetryCount) { super(maxRetryCount); this.minBackoff = minBackoff; this.maxBackoff = maxBackoff; this.retryFactor = computeRetryFactor(); } @Override protected Duration calculateNextRetryInterval(final Exception lastException, final Duration remainingTime, final int baseWaitSeconds, final int retryCount) { if ((!Retry.isRetriableException(lastException)) || retryCount >= super.maxRetryCount()) { return null; } final double nextRetryInterval = Math.pow(retryFactor, (double) retryCount); final long nextRetryIntervalSeconds = (long) nextRetryInterval; final long nextRetryIntervalNano = (long) ((nextRetryInterval - (double) nextRetryIntervalSeconds) * 1000000000); if (remainingTime.getSeconds() < Math.max(nextRetryInterval, TIMER_TOLERANCE.getSeconds())) { return null; } final Duration retryAfter = this.minBackoff.plus(Duration.ofSeconds(nextRetryIntervalSeconds, nextRetryIntervalNano)); return retryAfter.plus(Duration.ofSeconds(baseWaitSeconds)); } }
class ExponentialRetry extends Retry { public static final Duration TIMER_TOLERANCE = Duration.ofSeconds(1); private final Duration minBackoff; private final Duration maxBackoff; private final double retryFactor; /** * Creates a new instance with a minimum and maximum retry period in addition to maximum number of retry attempts. * * @param minBackoff The minimum time period permissible for backing off between retries. * @param maxBackoff The maximum time period permissible for backing off between retries. * @param maxRetryCount The maximum number of retries allowed. */ public ExponentialRetry(Duration minBackoff, Duration maxBackoff, int maxRetryCount) { super(maxRetryCount); this.minBackoff = minBackoff; this.maxBackoff = maxBackoff; this.retryFactor = computeRetryFactor(); } @Override protected Duration calculateNextRetryInterval(final Exception lastException, final Duration remainingTime, final int baseWaitSeconds, final int retryCount) { final double nextRetryInterval = Math.pow(retryFactor, (double) retryCount); final long nextRetryIntervalSeconds = (long) nextRetryInterval; final long nextRetryIntervalNano = (long) ((nextRetryInterval - (double) nextRetryIntervalSeconds) * 1000000000); if (remainingTime.getSeconds() < Math.max(nextRetryInterval, TIMER_TOLERANCE.getSeconds())) { return null; } final Duration retryAfter = minBackoff.plus(Duration.ofSeconds(nextRetryIntervalSeconds, nextRetryIntervalNano)); return retryAfter.plus(Duration.ofSeconds(baseWaitSeconds)); } }
nit: Any reason each parameter is on a new line? Does it reach the 120 characters/line limit?
public static Retry getDefaultRetry() { return new ExponentialRetry( DEFAULT_RETRY_MIN_BACKOFF, DEFAULT_RETRY_MAX_BACKOFF, DEFAULT_MAX_RETRY_COUNT); }
DEFAULT_RETRY_MIN_BACKOFF,
public static Retry getDefaultRetry() { return new ExponentialRetry(DEFAULT_RETRY_MIN_BACKOFF, DEFAULT_RETRY_MAX_BACKOFF, DEFAULT_MAX_RETRY_COUNT); }
class Retry { public static final Duration DEFAULT_RETRY_MIN_BACKOFF = Duration.ofSeconds(0); public static final Duration DEFAULT_RETRY_MAX_BACKOFF = Duration.ofSeconds(30); public static final int DEFAULT_MAX_RETRY_COUNT = 10; private final AtomicInteger retryCount = new AtomicInteger(); private final int maxRetryCount; /** * Creates a new instance of Retry with the maximum retry count of {@code maxRetryCount} * * @param maxRetryCount The maximum number of retries allowed. */ public Retry(int maxRetryCount) { this.maxRetryCount = maxRetryCount; } /** * Check if the existing exception is a retriable exception. * * @param exception An exception that was observed for the operation to be retried. * @return true if the exception is a retriable exception, otherwise false. */ public static boolean isRetriableException(Exception exception) { return (exception instanceof AmqpException) && ((AmqpException) exception).isTransient(); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getNoRetry() { return new ExponentialRetry( Duration.ZERO, Duration.ZERO, 0); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ /** * Increase one count to current count value. * * @return current AtomicInteger value. */ public int incrementRetryCount() { return retryCount.incrementAndGet(); } /** * Get the current retried count. * * @return current AtomicInteger value. */ public int getRetryCount() { return retryCount.get(); } /** * Reset AtomicInteger to value zero. */ public void resetRetryInterval() { retryCount.set(0); } /** * Get the maximum allowed retry count. * * @return maximum allowed retry count value. */ public int maxRetryCount() { return maxRetryCount; } /** * Calculates the amount of time to delay before the next retry attempt * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried. */ public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) { int baseWaitTime = 0; if (!isRetriableException(lastException)) { return null; } if (retryCount.get() >= maxRetryCount) { return null; } if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) { baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS; } return this.calculateNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); } /** * Allows a concrete retry policy implementation to offer a base retry interval to be used in * the calculations performed by 'Retry.GetNextRetryInterval'. * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @param baseWaitSeconds The number of seconds to base the suggested retry interval on; * this should be used as the minimum interval returned under normal circumstances. * @param retryCount The number of retries that have already been attempted. * @return The amount of time to delay before retrying the associated operation; if {@code null}, then the operation is no longer eligible to be retried. */ protected abstract Duration calculateNextRetryInterval(Exception lastException, Duration remainingTime, int baseWaitSeconds, int retryCount); }
class Retry { public static final Duration DEFAULT_RETRY_MIN_BACKOFF = Duration.ofSeconds(0); public static final Duration DEFAULT_RETRY_MAX_BACKOFF = Duration.ofSeconds(30); public static final int DEFAULT_MAX_RETRY_COUNT = 10; private final AtomicInteger retryCount = new AtomicInteger(); private final int maxRetryCount; /** * Creates a new instance of Retry with the maximum retry count of {@code maxRetryCount} * * @param maxRetryCount The maximum number of retries allowed. */ public Retry(int maxRetryCount) { this.maxRetryCount = maxRetryCount; } /** * Check if the existing exception is a retriable exception. * * @param exception An exception that was observed for the operation to be retried. * @return true if the exception is a retriable exception, otherwise false. */ public static boolean isRetriableException(Exception exception) { return (exception instanceof AmqpException) && ((AmqpException) exception).isTransient(); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ public static Retry getNoRetry() { return new ExponentialRetry(Duration.ZERO, Duration.ZERO, 0); } /** * Get default configured Retry. * * @return Retry which has all default property set up. */ /** * Increase one count to current count value. * * @return current AtomicInteger value. */ public int incrementRetryCount() { return retryCount.incrementAndGet(); } /** * Get the current retried count. * * @return current AtomicInteger value. */ public int getRetryCount() { return retryCount.get(); } /** * Reset AtomicInteger to value zero. */ public void resetRetryInterval() { retryCount.set(0); } /** * Get the maximum allowed retry count. * * @return maximum allowed retry count value. */ public int maxRetryCount() { return maxRetryCount; } /** * Calculates the amount of time to delay before the next retry attempt * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @return The amount of time to delay before retrying the associated operation; if {@code null}, * then the operation is no longer eligible to be retried. */ public Duration getNextRetryInterval(Exception lastException, Duration remainingTime) { int baseWaitTime = 0; if (!isRetriableException(lastException)) { return null; } if (retryCount.get() >= maxRetryCount) { return null; } if (((AmqpException) lastException).getErrorCondition() == ErrorCondition.SERVER_BUSY_ERROR) { baseWaitTime += ClientConstants.SERVER_BUSY_BASE_SLEEP_TIME_IN_SECS; } return this.calculateNextRetryInterval(lastException, remainingTime, baseWaitTime, this.getRetryCount()); } /** * Allows a concrete retry policy implementation to offer a base retry interval to be used in * the calculations performed by 'Retry.GetNextRetryInterval'. * * @param lastException The last exception that was observed for the operation to be retried. * @param remainingTime The amount of time remaining for the cumulative timeout across retry attempts. * @param baseWaitSeconds The number of seconds to base the suggested retry interval on; * this should be used as the minimum interval returned under normal circumstances. * @param retryCount The number of retries that have already been attempted. * @return The amount of time to delay before retrying the associated operation; if {@code null}, * then the operation is no longer eligible to be retried. */ protected abstract Duration calculateNextRetryInterval(Exception lastException, Duration remainingTime, int baseWaitSeconds, int retryCount); }
> return new String[] { resource + "/.default" }; [](start = 7, length = 48) Should this use the DEFAULT_SUFFIX constant above?
public static String[] resourceToScopes(String resource) { Objects.requireNonNull(resource); return new String[] { resource + "/.default" }; }
return new String[] { resource + "/.default" };
public static String[] resourceToScopes(String resource) { Objects.requireNonNull(resource); return new String[] { resource + DEFAULT_SUFFIX }; }
class ScopeUtil { private static final String DEFAULT_SUFFIX = "/.defualt"; /** * Convert a list of scopes to a resource for Azure Active Directory. * @param scopes the list of scopes to authenticate to * @return the resource to authenticate with Azure Active Directory. * @throws IllegalArgumentException if scopes is empty or has more than 1 items */ public static String scopesToResource(String[] scopes) { Objects.requireNonNull(scopes); if (scopes.length != 1) { throw new IllegalArgumentException("To convert to a resource string the specified array must be exactly length 1"); } if (!scopes[0].endsWith(DEFAULT_SUFFIX)) { return scopes[0]; } return scopes[0].substring(0, scopes[0].lastIndexOf(DEFAULT_SUFFIX)); } /** * Convert a resource to a list of scopes. * @param resource the resource for Azure Active Directory * @return the list of scopes */ private ScopeUtil() { } }
class ScopeUtil { private static final String DEFAULT_SUFFIX = "/.default"; /** * Convert a list of scopes to a resource for Azure Active Directory. * @param scopes the list of scopes to authenticate to * @return the resource to authenticate with Azure Active Directory. * @throws IllegalArgumentException if scopes is empty or has more than 1 items */ public static String scopesToResource(String[] scopes) { Objects.requireNonNull(scopes); if (scopes.length != 1) { throw new IllegalArgumentException("To convert to a resource string the specified array must be exactly length 1"); } if (!scopes[0].endsWith(DEFAULT_SUFFIX)) { return scopes[0]; } return scopes[0].substring(0, scopes[0].lastIndexOf(DEFAULT_SUFFIX)); } /** * Convert a resource to a list of scopes. * @param resource the resource for Azure Active Directory * @return the list of scopes */ private ScopeUtil() { } }
This should be a CheckStyle warning. No left curly brace for a new line.
public String toString() { if (StringUtil.isNullOrWhiteSpace(this.connectionString)) { StringBuilder connectionStringBuilder = new StringBuilder(); if (this.endpoint != null) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", ENDPOINT_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.endpoint.toString(), KEY_VALUE_PAIR_DELIMITER)); } if (!StringUtil.isNullOrWhiteSpace(this.entityPath)) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", ENTITY_PATH_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.entityPath, KEY_VALUE_PAIR_DELIMITER)); } if (!StringUtil.isNullOrWhiteSpace(this.sharedAccessKeyName)) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", SHARED_ACCESS_KEY_NAME_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.sharedAccessKeyName, KEY_VALUE_PAIR_DELIMITER)); } if (!StringUtil.isNullOrWhiteSpace(this.sharedAccessKey)) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s", SHARED_ACCESS_KEY_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.sharedAccessKey)); } if (!StringUtil.isNullOrWhiteSpace(this.sharedAccessSingatureToken)) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s", sharedAccessSignatureTokenKeyName, KEY_VALUE_SEPARATOR, this.sharedAccessSingatureToken)); } if (this.operationTimeout != null) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", KEY_VALUE_PAIR_DELIMITER, OPERATION_TIMEOUT_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.operationTimeout.toString())); } if (this.retryPolicy != null) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", KEY_VALUE_PAIR_DELIMITER, RETRY_POLICY_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.retryPolicy.toString())); } if (this.transportType != null) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", KEY_VALUE_PAIR_DELIMITER, TRANSPORT_TYPE_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.transportType.toString())); } if (this.authentication != null) { connectionStringBuilder.append(String.format(Locale.US,"%s%s%s%s", KEY_VALUE_PAIR_DELIMITER, AUTHENTICATION_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.authentication.toString())); } this.connectionString = connectionStringBuilder.toString(); } return this.connectionString; }
{
public String toString() { if (StringUtil.isNullOrWhiteSpace(this.connectionString)) { StringBuilder connectionStringBuilder = new StringBuilder(); if (this.endpoint != null) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", ENDPOINT_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.endpoint.toString(), KEY_VALUE_PAIR_DELIMITER)); } if (!StringUtil.isNullOrWhiteSpace(this.entityPath)) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", ENTITY_PATH_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.entityPath, KEY_VALUE_PAIR_DELIMITER)); } if (!StringUtil.isNullOrWhiteSpace(this.sharedAccessKeyName)) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", SHARED_ACCESS_KEY_NAME_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.sharedAccessKeyName, KEY_VALUE_PAIR_DELIMITER)); } if (!StringUtil.isNullOrWhiteSpace(this.sharedAccessKey)) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s", SHARED_ACCESS_KEY_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.sharedAccessKey)); } if (!StringUtil.isNullOrWhiteSpace(this.sharedAccessSingatureToken)) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s", sharedAccessSignatureTokenKeyName, KEY_VALUE_SEPARATOR, this.sharedAccessSingatureToken)); } if (this.operationTimeout != null) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", KEY_VALUE_PAIR_DELIMITER, OPERATION_TIMEOUT_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.operationTimeout.toString())); } if (this.retryPolicy != null) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", KEY_VALUE_PAIR_DELIMITER, RETRY_POLICY_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.retryPolicy.toString())); } if (this.transportType != null) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", KEY_VALUE_PAIR_DELIMITER, TRANSPORT_TYPE_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.transportType.toString())); } if (this.authentication != null) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", KEY_VALUE_PAIR_DELIMITER, AUTHENTICATION_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.authentication)); } this.connectionString = connectionStringBuilder.toString(); } return this.connectionString; }
class ConnectionStringBuilder { private static final String END_POINT_RAW_FORMAT = "amqps: private static final String HOSTNAME_CONFIG_NAME = "Hostname"; private static final String ENDPOINT_CONFIG_NAME = "Endpoint"; private static final String SHARED_ACCESS_KEY_NAME_CONFIG_NAME = "SharedAccessKeyName"; private static final String SHARED_ACCESS_KEY_CONFIG_NAME = "SharedAccessKey"; private static final String ALTERNATE_SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME = "SharedAccessSignature"; private static final String SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME = "SharedAccessSignatureToken"; private static final String TRANSPORT_TYPE_CONFIG_NAME = "TransportType"; private static final String ENTITY_PATH_CONFIG_NAME = "EntityPath"; private static final String OPERATION_TIMEOUT_CONFIG_NAME = "OperationTimeout"; private static final String RETRY_POLICY_CONFIG_NAME = "RetryPolicy"; private static final String AUTHENTICATION_CONFIG_NAME = "Authentication"; private static final String KEY_VALUE_SEPARATOR = "="; private static final String KEY_VALUE_PAIR_DELIMITER = ";"; private static final String ALL_KEY_ENUMERATE_REGEX = "(" + String.join("|", HOSTNAME_CONFIG_NAME, ENDPOINT_CONFIG_NAME, SHARED_ACCESS_KEY_NAME_CONFIG_NAME, SHARED_ACCESS_KEY_CONFIG_NAME, SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME, ENTITY_PATH_CONFIG_NAME, OPERATION_TIMEOUT_CONFIG_NAME, RETRY_POLICY_CONFIG_NAME, ALTERNATE_SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME, TRANSPORT_TYPE_CONFIG_NAME, AUTHENTICATION_CONFIG_NAME, ")" ); private static final String KEYS_WITH_DELIMITERS_REGEX = KEY_VALUE_PAIR_DELIMITER + ALL_KEY_ENUMERATE_REGEX + KEY_VALUE_SEPARATOR; private String connectionString; private URI endpoint; private String authentication; private String sharedAccessKeyName; private String sharedAccessKey; private String sharedAccessSingatureToken; private String sharedAccessSignatureTokenKeyName; private String entityPath; private Duration operationTimeout; private RetryPolicy retryPolicy; private TransportType transportType; /** * Default operation timeout if timeout is not specified in the connection string. 30 seconds. */ public static final Duration DefaultOperationTimeout = Duration.ofSeconds(ClientConstants.DEFAULT_OPERATION_TIMEOUT_IN_SECONDS); /** * Connection string value used for the Authentication field which indicates that * Managed Identity TokenProvider will be used for authentication purposes. */ public static final String MANAGED_IDENTITY_AUTHENTICATION = "Managed Identity"; private ConnectionStringBuilder( final URI endpointAddress, final String entityPath, final Duration operationTimeout, final RetryPolicy retryPolicy) { this.endpoint = endpointAddress; this.operationTimeout = operationTimeout; this.retryPolicy = retryPolicy; this.entityPath = entityPath; } private ConnectionStringBuilder( final URI endpointAddress, final String entityPath, final String sharedAccessKeyName, final String sharedAccessKey, final Duration operationTimeout, final RetryPolicy retryPolicy) { this(endpointAddress, entityPath, operationTimeout, retryPolicy); this.sharedAccessKey = sharedAccessKey; this.sharedAccessKeyName = sharedAccessKeyName; } private ConnectionStringBuilder( final URI endpointAddress, final String entityPath, final String sharedAccessSingatureToken, final Duration operationTimeout, final RetryPolicy retryPolicy) { this(endpointAddress, entityPath, operationTimeout, retryPolicy); this.sharedAccessSingatureToken = sharedAccessSingatureToken; } private ConnectionStringBuilder( final String namespaceName, final String entityPath, final String sharedAccessKeyName, final String sharedAccessKey, final Duration operationTimeout, final RetryPolicy retryPolicy) { this(Util.convertNamespaceToEndPointURI(namespaceName), entityPath, sharedAccessKeyName, sharedAccessKey, operationTimeout, retryPolicy); } private ConnectionStringBuilder( final String namespaceName, final String entityPath, final String sharedAccessSingatureToken, final Duration operationTimeout, final RetryPolicy retryPolicy) { this(Util.convertNamespaceToEndPointURI(namespaceName), entityPath, sharedAccessSingatureToken, operationTimeout, retryPolicy); } /** * Creates a new instance from namespace, entity path and SAS Key name and value. * @param namespaceName Namespace name (dns suffix - ex: .servicebus.windows.net is not required) * @param entityPath Entity path. For queue or topic, use name. For subscription use &lt;topicName&gt;/subscriptions/&lt;subscriptionName&gt; * @param sharedAccessKeyName Shared Access Key name * @param sharedAccessKey Shared Access Key */ public ConnectionStringBuilder( final String namespaceName, final String entityPath, final String sharedAccessKeyName, final String sharedAccessKey) { this(namespaceName, entityPath, sharedAccessKeyName, sharedAccessKey, ConnectionStringBuilder.DefaultOperationTimeout, RetryPolicy.getDefault()); } /** * Creates a new instance from namespace, entity path and already generated SAS token. * @param namespaceName Namespace name (dns suffix - ex: .servicebus.windows.net is not required) * @param entityPath Entity path. For queue or topic, use name. For subscription use &lt;topicName&gt;/subscriptions/&lt;subscriptionName&gt; * @param sharedAccessSingature Shared Access Signature already generated */ public ConnectionStringBuilder( final String namespaceName, final String entityPath, final String sharedAccessSingature) { this(namespaceName, entityPath, sharedAccessSingature, ConnectionStringBuilder.DefaultOperationTimeout, RetryPolicy.getDefault()); } /** * Creates a new instance from endpoint address of the namesapce, entity path and SAS Key name and value * @param endpointAddress namespace level endpoint. This needs to be in the format of scheme: * @param entityPath Entity path. For queue or topic, use name. For subscription use &lt;topicName&gt;/subscriptions/&lt;subscriptionName&gt; * @param sharedAccessKeyName Shared Access Key name * @param sharedAccessKey Shared Access Key */ public ConnectionStringBuilder( final URI endpointAddress, final String entityPath, final String sharedAccessKeyName, final String sharedAccessKey) { this(endpointAddress, entityPath, sharedAccessKeyName, sharedAccessKey, ConnectionStringBuilder.DefaultOperationTimeout, RetryPolicy.getDefault()); } /** * Creates a new instance from endpoint address of the namesapce, entity path and already generated SAS token. * @param endpointAddress namespace level endpoint. This needs to be in the format of scheme: * @param entityPath Entity path. For queue or topic, use name. For subscription use &lt;topicName&gt;/subscriptions/&lt;subscriptionName&gt; * @param sharedAccessSingature Shared Access Signature already generated */ public ConnectionStringBuilder( final URI endpointAddress, final String entityPath, final String sharedAccessSingature) { this(endpointAddress, entityPath, sharedAccessSingature, ConnectionStringBuilder.DefaultOperationTimeout, RetryPolicy.getDefault()); } /** * Creates a new instance from the given connection string. * ConnectionString format: * Endpoint=sb: * or Endpoint=sb: * @param connectionString ServiceBus ConnectionString * @throws IllegalConnectionStringFormatException when the format of the ConnectionString is not valid */ public ConnectionStringBuilder(String connectionString) { this.parseConnectionString(connectionString); } /** * Creates a new instance from the given connection string and entity path. A connection string may or may not include the entity path. * ConnectionString format: * Endpoint=sb: * or Endpoint=sb: * @param namespaceConnectionString connections string of the ServiceBus namespace. This doesn't include the entity path. * @param entityPath path to the entity within the namespace */ public ConnectionStringBuilder(String namespaceConnectionString, String entityPath) { this(namespaceConnectionString); this.entityPath = entityPath; } /** * Get the endpoint which can be used to connect to the ServiceBus Namespace * @return Endpoint representing the service bus namespace */ public URI getEndpoint() { return this.endpoint; } /** * Get the shared access policy key value from the connection string or null. * @return Shared Access Signature key value */ public String getSasKey() { return this.sharedAccessKey; } /** * Get the shared access policy owner name from the connection string or null. * @return Shared Access Signature key name */ public String getSasKeyName() { return this.sharedAccessKeyName; } /** * Returns the shared access signature token from the connection string or null. * @return Shared Access Signature Token */ public String getSharedAccessSignatureToken() { return this.sharedAccessSingatureToken; } /** * Get the entity path value from the connection string * @return Entity Path */ public String getEntityPath() { return this.entityPath; } /** * Gets the duration after which a pending operation like Send or RECEIVE will time out. If a timeout is not specified, it defaults to {@link * This value will be used by all operations which uses this {@link ConnectionStringBuilder}, unless explicitly over-ridden. * @return operationTimeout */ public Duration getOperationTimeout() { return (this.operationTimeout == null ? ConnectionStringBuilder.DefaultOperationTimeout : this.operationTimeout); } /** * Set the OperationTimeout value in the Connection String. This value will be used by all operations which uses this {@link ConnectionStringBuilder}, unless explicitly over-ridden. * <p>ConnectionString with operationTimeout is not inter-operable between java and clients in other platforms. * @param operationTimeout Operation Timeout */ public void setOperationTimeout(final Duration operationTimeout) { this.operationTimeout = operationTimeout; } /** * Get the retry policy instance that was created as part of this builder's creation. * @return RetryPolicy applied for any operation performed using this ConnectionString */ public RetryPolicy getRetryPolicy() { return (this.retryPolicy == null ? RetryPolicy.getDefault() : this.retryPolicy); } /** * Set the retry policy. * <p>RetryPolicy is not Serialized as part of {@link ConnectionStringBuilder * @param retryPolicy RetryPolicy applied for any operation performed using this ConnectionString */ public void setRetryPolicy(final RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; } /** * TransportType on which all the communication for the Service Bus created using this ConnectionString. * Default value is {@link TransportType * * @return transportType */ public TransportType getTransportType() { return (this.transportType == null ? TransportType.AMQP : transportType); } /** * Set the TransportType value in the Connection String. If no TransportType is set, this defaults to {@link TransportType * * @param transportType Transport Type * @return the {@link ConnectionStringBuilder} instance being set. */ public ConnectionStringBuilder setTransportType(final TransportType transportType) { this.transportType = transportType; return this; } /** * @return Returns the authentication method. */ public String getAuthentication() { return this.authentication; } /** * Returns an inter-operable connection string that can be used to connect to ServiceBus Namespace * @return connection string */ @Override private void parseConnectionString(String connectionString) { if (StringUtil.isNullOrWhiteSpace(connectionString)) { throw new IllegalConnectionStringFormatException(String.format("connectionString cannot be empty")); } String connection = KEY_VALUE_PAIR_DELIMITER + connectionString; Pattern keyValuePattern = Pattern.compile(KEYS_WITH_DELIMITERS_REGEX, Pattern.CASE_INSENSITIVE); String[] values = keyValuePattern.split(connection); Matcher keys = keyValuePattern.matcher(connection); if (values == null || values.length <= 1 || keys.groupCount() == 0) { throw new IllegalConnectionStringFormatException("Connection String cannot be parsed."); } if (!StringUtil.isNullOrWhiteSpace((values[0]))) { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "Cannot parse part of ConnectionString: %s", values[0])); } int valueIndex = 0; while (keys.find()) { valueIndex++; String key = keys.group(); key = key.substring(1, key.length() - 1); if (values.length < valueIndex + 1) { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "Value for the connection string parameter name: %s, not found", key)); } if (key.equalsIgnoreCase(ENDPOINT_CONFIG_NAME)) { if (this.endpoint != null) { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "Multiple %s and/or %s detected. Make sure only one is defined", ENDPOINT_CONFIG_NAME, HOSTNAME_CONFIG_NAME)); } try { this.endpoint = new URI(values[valueIndex]); } catch (URISyntaxException exception) { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "%s should be in format scheme: exception); } } else if (key.equalsIgnoreCase(HOSTNAME_CONFIG_NAME)) { if (this.endpoint != null) { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "Multiple %s and/or %s detected. Make sure only one is defined", ENDPOINT_CONFIG_NAME, HOSTNAME_CONFIG_NAME)); } try { this.endpoint = new URI(String.format(Locale.US, END_POINT_RAW_FORMAT, values[valueIndex])); } catch (URISyntaxException exception) { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "%s should be a fully quantified host name address", HOSTNAME_CONFIG_NAME), exception); } } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY_NAME_CONFIG_NAME)) { if (this.authentication != null) { throw new IllegalConnectionStringFormatException( String.format("Cannot have values specified for properties '%s' and '%s' at the same time", SHARED_ACCESS_KEY_NAME_CONFIG_NAME, AUTHENTICATION_CONFIG_NAME)); } this.sharedAccessKeyName = values[valueIndex]; } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY_CONFIG_NAME)) { if (this.authentication != null) { throw new IllegalConnectionStringFormatException( String.format("Cannot have values specified for properties '%s' and '%s' at the same time", SHARED_ACCESS_KEY_CONFIG_NAME, AUTHENTICATION_CONFIG_NAME)); } this.sharedAccessKey = values[valueIndex]; } else if (key.equalsIgnoreCase(SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME)) { if (this.authentication != null) { throw new IllegalConnectionStringFormatException( String.format("Cannot have values specified for properties '%s' and '%s' at the same time", SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME, AUTHENTICATION_CONFIG_NAME)); } this.sharedAccessSingatureToken = values[valueIndex]; this.sharedAccessSignatureTokenKeyName = SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME; } else if (key.equalsIgnoreCase(ALTERNATE_SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME)) { if (this.authentication != null) { throw new IllegalConnectionStringFormatException( String.format("Cannot have values specified for properties '%s' and '%s' at the same time", ALTERNATE_SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME, AUTHENTICATION_CONFIG_NAME)); } this.sharedAccessSingatureToken = values[valueIndex]; this.sharedAccessSignatureTokenKeyName = ALTERNATE_SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME; } else if (key.equalsIgnoreCase(ENTITY_PATH_CONFIG_NAME)) { this.entityPath = values[valueIndex]; } else if (key.equalsIgnoreCase(OPERATION_TIMEOUT_CONFIG_NAME)) { try { this.operationTimeout = Duration.parse(values[valueIndex]); } catch (DateTimeParseException exception) { throw new IllegalConnectionStringFormatException("Invalid value specified for property 'Duration' in the ConnectionString.", exception); } } else if (key.equalsIgnoreCase(RETRY_POLICY_CONFIG_NAME)) { this.retryPolicy = values[valueIndex].equals(ClientConstants.DEFAULT_RETRY) ? RetryPolicy.getDefault() : (values[valueIndex].equals(ClientConstants.NO_RETRY) ? RetryPolicy.getNoRetry() : null); if (this.retryPolicy == null) { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "Connection string parameter '%s'='%s' is not recognized", RETRY_POLICY_CONFIG_NAME, values[valueIndex])); } } else if (key.equalsIgnoreCase(TRANSPORT_TYPE_CONFIG_NAME)) { try { this.transportType = TransportType.fromString(values[valueIndex]); } catch (IllegalArgumentException exception) { throw new IllegalConnectionStringFormatException( String.format("Invalid value specified for property '%s' in the ConnectionString.", TRANSPORT_TYPE_CONFIG_NAME), exception); } } else if (key.equalsIgnoreCase(AUTHENTICATION_CONFIG_NAME)) { if (this.sharedAccessKeyName != null || this.sharedAccessKey != null || this.sharedAccessSingatureToken != null) { throw new IllegalConnectionStringFormatException( String.format("Cannot have values specified for properties '%s' and Shared Access Token at the same time", AUTHENTICATION_CONFIG_NAME)); } this.authentication = values[valueIndex]; } else { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "Illegal connection string parameter name: %s", key)); } } } public String toLoggableString() { StringBuilder connectionStringBuilder = new StringBuilder(); if (this.endpoint != null) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", ENDPOINT_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.endpoint.toString(), KEY_VALUE_PAIR_DELIMITER)); } if (!StringUtil.isNullOrWhiteSpace(this.entityPath)) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", ENTITY_PATH_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.entityPath, KEY_VALUE_PAIR_DELIMITER)); } return connectionStringBuilder.toString(); } }
class ConnectionStringBuilder { private static final String END_POINT_RAW_FORMAT = "amqps: private static final String HOSTNAME_CONFIG_NAME = "Hostname"; private static final String ENDPOINT_CONFIG_NAME = "Endpoint"; private static final String SHARED_ACCESS_KEY_NAME_CONFIG_NAME = "SharedAccessKeyName"; private static final String SHARED_ACCESS_KEY_CONFIG_NAME = "SharedAccessKey"; private static final String ALTERNATE_SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME = "SharedAccessSignature"; private static final String SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME = "SharedAccessSignatureToken"; private static final String TRANSPORT_TYPE_CONFIG_NAME = "TransportType"; private static final String ENTITY_PATH_CONFIG_NAME = "EntityPath"; private static final String OPERATION_TIMEOUT_CONFIG_NAME = "OperationTimeout"; private static final String RETRY_POLICY_CONFIG_NAME = "RetryPolicy"; private static final String AUTHENTICATION_CONFIG_NAME = "Authentication"; private static final String KEY_VALUE_SEPARATOR = "="; private static final String KEY_VALUE_PAIR_DELIMITER = ";"; private static final String ALL_KEY_ENUMERATE_REGEX = "(" + String.join("|", HOSTNAME_CONFIG_NAME, ENDPOINT_CONFIG_NAME, SHARED_ACCESS_KEY_NAME_CONFIG_NAME, SHARED_ACCESS_KEY_CONFIG_NAME, SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME, ENTITY_PATH_CONFIG_NAME, OPERATION_TIMEOUT_CONFIG_NAME, RETRY_POLICY_CONFIG_NAME, ALTERNATE_SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME, TRANSPORT_TYPE_CONFIG_NAME, AUTHENTICATION_CONFIG_NAME, ")" ); private static final String KEYS_WITH_DELIMITERS_REGEX = KEY_VALUE_PAIR_DELIMITER + ALL_KEY_ENUMERATE_REGEX + KEY_VALUE_SEPARATOR; private String connectionString; private URI endpoint; private String authentication; private String sharedAccessKeyName; private String sharedAccessKey; private String sharedAccessSingatureToken; private String sharedAccessSignatureTokenKeyName; private String entityPath; private Duration operationTimeout; private RetryPolicy retryPolicy; private TransportType transportType; /** * Default operation timeout if timeout is not specified in the connection string. 30 seconds. */ public static final Duration DefaultOperationTimeout = Duration.ofSeconds(ClientConstants.DEFAULT_OPERATION_TIMEOUT_IN_SECONDS); /** * Connection string value used for the Authentication field which indicates that * Managed Identity TokenProvider will be used for authentication purposes. */ public static final String MANAGED_IDENTITY_AUTHENTICATION = "Managed Identity"; private ConnectionStringBuilder( final URI endpointAddress, final String entityPath, final Duration operationTimeout, final RetryPolicy retryPolicy) { this.endpoint = endpointAddress; this.operationTimeout = operationTimeout; this.retryPolicy = retryPolicy; this.entityPath = entityPath; } private ConnectionStringBuilder( final URI endpointAddress, final String entityPath, final String sharedAccessKeyName, final String sharedAccessKey, final Duration operationTimeout, final RetryPolicy retryPolicy) { this(endpointAddress, entityPath, operationTimeout, retryPolicy); this.sharedAccessKey = sharedAccessKey; this.sharedAccessKeyName = sharedAccessKeyName; } private ConnectionStringBuilder( final URI endpointAddress, final String entityPath, final String sharedAccessSingatureToken, final Duration operationTimeout, final RetryPolicy retryPolicy) { this(endpointAddress, entityPath, operationTimeout, retryPolicy); this.sharedAccessSingatureToken = sharedAccessSingatureToken; } private ConnectionStringBuilder( final String namespaceName, final String entityPath, final String sharedAccessKeyName, final String sharedAccessKey, final Duration operationTimeout, final RetryPolicy retryPolicy) { this(Util.convertNamespaceToEndPointURI(namespaceName), entityPath, sharedAccessKeyName, sharedAccessKey, operationTimeout, retryPolicy); } private ConnectionStringBuilder( final String namespaceName, final String entityPath, final String sharedAccessSingatureToken, final Duration operationTimeout, final RetryPolicy retryPolicy) { this(Util.convertNamespaceToEndPointURI(namespaceName), entityPath, sharedAccessSingatureToken, operationTimeout, retryPolicy); } /** * Creates a new instance from namespace, entity path and SAS Key name and value. * @param namespaceName Namespace name (dns suffix - ex: .servicebus.windows.net is not required) * @param entityPath Entity path. For queue or topic, use name. For subscription use &lt;topicName&gt;/subscriptions/&lt;subscriptionName&gt; * @param sharedAccessKeyName Shared Access Key name * @param sharedAccessKey Shared Access Key */ public ConnectionStringBuilder( final String namespaceName, final String entityPath, final String sharedAccessKeyName, final String sharedAccessKey) { this(namespaceName, entityPath, sharedAccessKeyName, sharedAccessKey, ConnectionStringBuilder.DefaultOperationTimeout, RetryPolicy.getDefault()); } /** * Creates a new instance from namespace, entity path and already generated SAS token. * @param namespaceName Namespace name (dns suffix - ex: .servicebus.windows.net is not required) * @param entityPath Entity path. For queue or topic, use name. For subscription use &lt;topicName&gt;/subscriptions/&lt;subscriptionName&gt; * @param sharedAccessSingature Shared Access Signature already generated */ public ConnectionStringBuilder( final String namespaceName, final String entityPath, final String sharedAccessSingature) { this(namespaceName, entityPath, sharedAccessSingature, ConnectionStringBuilder.DefaultOperationTimeout, RetryPolicy.getDefault()); } /** * Creates a new instance from endpoint address of the namesapce, entity path and SAS Key name and value * @param endpointAddress namespace level endpoint. This needs to be in the format of scheme: * @param entityPath Entity path. For queue or topic, use name. For subscription use &lt;topicName&gt;/subscriptions/&lt;subscriptionName&gt; * @param sharedAccessKeyName Shared Access Key name * @param sharedAccessKey Shared Access Key */ public ConnectionStringBuilder( final URI endpointAddress, final String entityPath, final String sharedAccessKeyName, final String sharedAccessKey) { this(endpointAddress, entityPath, sharedAccessKeyName, sharedAccessKey, ConnectionStringBuilder.DefaultOperationTimeout, RetryPolicy.getDefault()); } /** * Creates a new instance from endpoint address of the namesapce, entity path and already generated SAS token. * @param endpointAddress namespace level endpoint. This needs to be in the format of scheme: * @param entityPath Entity path. For queue or topic, use name. For subscription use &lt;topicName&gt;/subscriptions/&lt;subscriptionName&gt; * @param sharedAccessSingature Shared Access Signature already generated */ public ConnectionStringBuilder( final URI endpointAddress, final String entityPath, final String sharedAccessSingature) { this(endpointAddress, entityPath, sharedAccessSingature, ConnectionStringBuilder.DefaultOperationTimeout, RetryPolicy.getDefault()); } /** * Creates a new instance from the given connection string. * ConnectionString format: * Endpoint=sb: * or Endpoint=sb: * @param connectionString ServiceBus ConnectionString * @throws IllegalConnectionStringFormatException when the format of the ConnectionString is not valid */ public ConnectionStringBuilder(String connectionString) { this.parseConnectionString(connectionString); } /** * Creates a new instance from the given connection string and entity path. A connection string may or may not include the entity path. * ConnectionString format: * Endpoint=sb: * or Endpoint=sb: * @param namespaceConnectionString connections string of the ServiceBus namespace. This doesn't include the entity path. * @param entityPath path to the entity within the namespace */ public ConnectionStringBuilder(String namespaceConnectionString, String entityPath) { this(namespaceConnectionString); this.entityPath = entityPath; } /** * Get the endpoint which can be used to connect to the ServiceBus Namespace * @return Endpoint representing the service bus namespace */ public URI getEndpoint() { return this.endpoint; } /** * Get the shared access policy key value from the connection string or null. * @return Shared Access Signature key value */ public String getSasKey() { return this.sharedAccessKey; } /** * Get the shared access policy owner name from the connection string or null. * @return Shared Access Signature key name */ public String getSasKeyName() { return this.sharedAccessKeyName; } /** * Returns the shared access signature token from the connection string or null. * @return Shared Access Signature Token */ public String getSharedAccessSignatureToken() { return this.sharedAccessSingatureToken; } /** * Get the entity path value from the connection string * @return Entity Path */ public String getEntityPath() { return this.entityPath; } /** * Gets the duration after which a pending operation like Send or RECEIVE will time out. If a timeout is not specified, it defaults to {@link * This value will be used by all operations which uses this {@link ConnectionStringBuilder}, unless explicitly over-ridden. * @return operationTimeout */ public Duration getOperationTimeout() { return (this.operationTimeout == null ? ConnectionStringBuilder.DefaultOperationTimeout : this.operationTimeout); } /** * Set the OperationTimeout value in the Connection String. This value will be used by all operations which uses this {@link ConnectionStringBuilder}, unless explicitly over-ridden. * <p>ConnectionString with operationTimeout is not inter-operable between java and clients in other platforms. * @param operationTimeout Operation Timeout */ public void setOperationTimeout(final Duration operationTimeout) { this.operationTimeout = operationTimeout; } /** * Get the retry policy instance that was created as part of this builder's creation. * @return RetryPolicy applied for any operation performed using this ConnectionString */ public RetryPolicy getRetryPolicy() { return (this.retryPolicy == null ? RetryPolicy.getDefault() : this.retryPolicy); } /** * Set the retry policy. * <p>RetryPolicy is not Serialized as part of {@link ConnectionStringBuilder * @param retryPolicy RetryPolicy applied for any operation performed using this ConnectionString */ public void setRetryPolicy(final RetryPolicy retryPolicy) { this.retryPolicy = retryPolicy; } /** * TransportType on which all the communication for the Service Bus created using this ConnectionString. * Default value is {@link TransportType * * @return transportType */ public TransportType getTransportType() { return (this.transportType == null ? TransportType.AMQP : transportType); } /** * Set the TransportType value in the Connection String. If no TransportType is set, this defaults to {@link TransportType * * @param transportType Transport Type * @return the {@link ConnectionStringBuilder} instance being set. */ public ConnectionStringBuilder setTransportType(final TransportType transportType) { this.transportType = transportType; return this; } /** * @return Returns the authentication method. */ public String getAuthentication() { return this.authentication; } /** * Returns an inter-operable connection string that can be used to connect to ServiceBus Namespace * @return connection string */ @Override private void parseConnectionString(String connectionString) { if (StringUtil.isNullOrWhiteSpace(connectionString)) { throw new IllegalConnectionStringFormatException(String.format("connectionString cannot be empty")); } String connection = KEY_VALUE_PAIR_DELIMITER + connectionString; Pattern keyValuePattern = Pattern.compile(KEYS_WITH_DELIMITERS_REGEX, Pattern.CASE_INSENSITIVE); String[] values = keyValuePattern.split(connection); Matcher keys = keyValuePattern.matcher(connection); if (values == null || values.length <= 1 || keys.groupCount() == 0) { throw new IllegalConnectionStringFormatException("Connection String cannot be parsed."); } if (!StringUtil.isNullOrWhiteSpace((values[0]))) { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "Cannot parse part of ConnectionString: %s", values[0])); } int valueIndex = 0; while (keys.find()) { valueIndex++; String key = keys.group(); key = key.substring(1, key.length() - 1); if (values.length < valueIndex + 1) { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "Value for the connection string parameter name: %s, not found", key)); } if (key.equalsIgnoreCase(ENDPOINT_CONFIG_NAME)) { if (this.endpoint != null) { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "Multiple %s and/or %s detected. Make sure only one is defined", ENDPOINT_CONFIG_NAME, HOSTNAME_CONFIG_NAME)); } try { this.endpoint = new URI(values[valueIndex]); } catch (URISyntaxException exception) { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "%s should be in format scheme: exception); } } else if (key.equalsIgnoreCase(HOSTNAME_CONFIG_NAME)) { if (this.endpoint != null) { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "Multiple %s and/or %s detected. Make sure only one is defined", ENDPOINT_CONFIG_NAME, HOSTNAME_CONFIG_NAME)); } try { this.endpoint = new URI(String.format(Locale.US, END_POINT_RAW_FORMAT, values[valueIndex])); } catch (URISyntaxException exception) { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "%s should be a fully quantified host name address", HOSTNAME_CONFIG_NAME), exception); } } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY_NAME_CONFIG_NAME)) { if (this.authentication != null) { throw new IllegalConnectionStringFormatException( String.format("Cannot have values specified for properties '%s' and '%s' at the same time", SHARED_ACCESS_KEY_NAME_CONFIG_NAME, AUTHENTICATION_CONFIG_NAME)); } this.sharedAccessKeyName = values[valueIndex]; } else if (key.equalsIgnoreCase(SHARED_ACCESS_KEY_CONFIG_NAME)) { if (this.authentication != null) { throw new IllegalConnectionStringFormatException( String.format("Cannot have values specified for properties '%s' and '%s' at the same time", SHARED_ACCESS_KEY_CONFIG_NAME, AUTHENTICATION_CONFIG_NAME)); } this.sharedAccessKey = values[valueIndex]; } else if (key.equalsIgnoreCase(SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME)) { if (this.authentication != null) { throw new IllegalConnectionStringFormatException( String.format("Cannot have values specified for properties '%s' and '%s' at the same time", SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME, AUTHENTICATION_CONFIG_NAME)); } this.sharedAccessSingatureToken = values[valueIndex]; this.sharedAccessSignatureTokenKeyName = SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME; } else if (key.equalsIgnoreCase(ALTERNATE_SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME)) { if (this.authentication != null) { throw new IllegalConnectionStringFormatException( String.format("Cannot have values specified for properties '%s' and '%s' at the same time", ALTERNATE_SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME, AUTHENTICATION_CONFIG_NAME)); } this.sharedAccessSingatureToken = values[valueIndex]; this.sharedAccessSignatureTokenKeyName = ALTERNATE_SHARED_ACCESS_SIGNATURE_TOKEN_CONFIG_NAME; } else if (key.equalsIgnoreCase(ENTITY_PATH_CONFIG_NAME)) { this.entityPath = values[valueIndex]; } else if (key.equalsIgnoreCase(OPERATION_TIMEOUT_CONFIG_NAME)) { try { this.operationTimeout = Duration.parse(values[valueIndex]); } catch (DateTimeParseException exception) { throw new IllegalConnectionStringFormatException("Invalid value specified for property 'Duration' in the ConnectionString.", exception); } } else if (key.equalsIgnoreCase(RETRY_POLICY_CONFIG_NAME)) { this.retryPolicy = values[valueIndex].equals(ClientConstants.DEFAULT_RETRY) ? RetryPolicy.getDefault() : (values[valueIndex].equals(ClientConstants.NO_RETRY) ? RetryPolicy.getNoRetry() : null); if (this.retryPolicy == null) { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "Connection string parameter '%s'='%s' is not recognized", RETRY_POLICY_CONFIG_NAME, values[valueIndex])); } } else if (key.equalsIgnoreCase(TRANSPORT_TYPE_CONFIG_NAME)) { try { this.transportType = TransportType.fromString(values[valueIndex]); } catch (IllegalArgumentException exception) { throw new IllegalConnectionStringFormatException( String.format("Invalid value specified for property '%s' in the ConnectionString.", TRANSPORT_TYPE_CONFIG_NAME), exception); } } else if (key.equalsIgnoreCase(AUTHENTICATION_CONFIG_NAME)) { if (this.sharedAccessKeyName != null || this.sharedAccessKey != null || this.sharedAccessSingatureToken != null) { throw new IllegalConnectionStringFormatException( String.format("Cannot have values specified for properties '%s' and Shared Access Token at the same time", AUTHENTICATION_CONFIG_NAME)); } this.authentication = values[valueIndex]; } else { throw new IllegalConnectionStringFormatException( String.format(Locale.US, "Illegal connection string parameter name: %s", key)); } } } public String toLoggableString() { StringBuilder connectionStringBuilder = new StringBuilder(); if (this.endpoint != null) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", ENDPOINT_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.endpoint.toString(), KEY_VALUE_PAIR_DELIMITER)); } if (!StringUtil.isNullOrWhiteSpace(this.entityPath)) { connectionStringBuilder.append(String.format(Locale.US, "%s%s%s%s", ENTITY_PATH_CONFIG_NAME, KEY_VALUE_SEPARATOR, this.entityPath, KEY_VALUE_PAIR_DELIMITER)); } return connectionStringBuilder.toString(); } }
I have tested what happen if I put empty string for date. It did not pass then. At the time we tested the API, the input and output I got is like: Header as input ``` headers: { Content-Length:0 If-Match: null ... } ``` The string after we built: ``` 0 null ``` Null are not supposed to appear in the string. This is what I observed. Can we double check the SharedKey in the Tests we built?
private String buildStringToSign(URL requestURL, String httpMethod, Map<String, String> headers) { String contentLength = headers.get("Content-Length"); contentLength = contentLength.equals("0") ? "" : contentLength; String dateHeader = (headers.containsKey("x-ms-date")) ? "" : headers.getOrDefault("Date", ""); return String.join("\n", httpMethod, headers.getOrDefault("Content-Encoding", ""), headers.getOrDefault("Content-Language", ""), contentLength, headers.getOrDefault("Content-MD5", ""), headers.getOrDefault("Content-Type", ""), dateHeader, headers.getOrDefault("If-Modified-Since", ""), headers.getOrDefault("If-Match", ""), headers.getOrDefault("If-None-Match", ""), headers.getOrDefault("If-Unmodified-Since", ""), headers.getOrDefault("Range", ""), getAdditionalXmsHeaders(headers), getCanonicalizedResource(requestURL)); }
String dateHeader = (headers.containsKey("x-ms-date")) ? "" : headers.getOrDefault("Date", "");
private String buildStringToSign(URL requestURL, String httpMethod, Map<String, String> headers) { String contentLength = headers.get("Content-Length"); contentLength = contentLength.equals("0") ? "" : contentLength; String dateHeader = (headers.containsKey("x-ms-date")) ? "" : headers.getOrDefault("Date", ""); return String.join("\n", httpMethod, headers.getOrDefault("Content-Encoding", ""), headers.getOrDefault("Content-Language", ""), contentLength, headers.getOrDefault("Content-MD5", ""), headers.getOrDefault("Content-Type", ""), dateHeader, headers.getOrDefault("If-Modified-Since", ""), headers.getOrDefault("If-Match", ""), headers.getOrDefault("If-None-Match", ""), headers.getOrDefault("If-Unmodified-Since", ""), headers.getOrDefault("Range", ""), getAdditionalXmsHeaders(headers), getCanonicalizedResource(requestURL)); }
class SharedKeyCredential { private static final String AUTHORIZATION_HEADER_FORMAT = "SharedKey %s:%s"; private static final String ACCOUNT_NAME = "AccountName".toLowerCase(); private static final String ACCOUNT_KEY = "AccountKey".toLowerCase(); private final String accountName; private final byte[] accountKey; /** * Initializes a new instance of SharedKeyCredentials contains an account's name and its primary or secondary * accountKey. * * @param accountName The account name associated with the request. * @param accountKey The account access key used to authenticate the request. */ public SharedKeyCredential(String accountName, String accountKey) { this.accountName = accountName; this.accountKey = Base64.getDecoder().decode(accountKey); } /** * Creates a SharedKey credential from the passed connection string. * @param connectionString Connection string used to build the SharedKey credential. * @return a SharedKey credential if the connection string contains AccountName and AccountKey * @throws IllegalArgumentException If {@code connectionString} doesn't have AccountName or AccountKey. */ public static SharedKeyCredential fromConnectionString(String connectionString) { HashMap<String, String> connectionStringPieces = new HashMap<>(); for (String connectionStringPiece : connectionString.split(";")) { String[] kvp = connectionStringPiece.split("=", 2); connectionStringPieces.put(kvp[0].toLowerCase(), kvp[1]); } String accountName = connectionStringPieces.get(ACCOUNT_NAME); String accountKey = connectionStringPieces.get(ACCOUNT_KEY); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); } return new SharedKeyCredential(accountName, accountKey); } /** * Generates the SharedKey Authorization value from information in the request. * @param requestURL URL of the request * @param httpMethod HTTP method being used * @param headers Headers on the request * @return the SharedKey authorization value */ public String generateAuthorizationHeader(URL requestURL, String httpMethod, Map<String, String> headers) { return computeHMACSHA256(buildStringToSign(requestURL, httpMethod, headers)); } private String getAdditionalXmsHeaders(Map<String, String> headers) { final List<String> xmsHeaderNameArray = headers.entrySet().stream() .filter(entry -> entry.getKey().toLowerCase(Locale.ROOT).startsWith("x-ms-")) .filter(entry -> entry.getValue() != null) .map(Map.Entry::getKey) .collect(Collectors.toList()); if (xmsHeaderNameArray.isEmpty()) { return ""; } Collections.sort(xmsHeaderNameArray); final StringBuilder canonicalizedHeaders = new StringBuilder(); for (final String key : xmsHeaderNameArray) { if (canonicalizedHeaders.length() > 0) { canonicalizedHeaders.append('\n'); } canonicalizedHeaders.append(key) .append(':') .append(headers.get(key)); } return canonicalizedHeaders.toString(); } private String getCanonicalizedResource(URL requestURL) { final StringBuilder canonicalizedResource = new StringBuilder("/"); canonicalizedResource.append(accountName); if (requestURL.getPath().length() > 0) { canonicalizedResource.append(requestURL.getPath()); } else { canonicalizedResource.append('/'); } if (requestURL.getQuery() == null) { return canonicalizedResource.toString(); } QueryStringDecoder queryDecoder = new QueryStringDecoder("?" + requestURL.getQuery()); Map<String, List<String>> queryParams = queryDecoder.parameters(); ArrayList<String> queryParamNames = new ArrayList<>(queryParams.keySet()); Collections.sort(queryParamNames); for (String queryParamName : queryParamNames) { final List<String> queryParamValues = queryParams.get(queryParamName); Collections.sort(queryParamValues); String queryParamValuesStr = String.join(",", queryParamValues); canonicalizedResource.append("\n") .append(queryParamName.toLowerCase(Locale.ROOT)) .append(":") .append(queryParamValuesStr); } return canonicalizedResource.toString(); } private String computeHMACSHA256(String stringToSign) { try { Mac hmacSha256 = Mac.getInstance("HmacSHA256"); hmacSha256.init(new SecretKeySpec(accountKey, "HmacSHA256")); byte[] utf8Bytes = stringToSign.getBytes(StandardCharsets.UTF_8); String signature = Base64.getEncoder().encodeToString(hmacSha256.doFinal(utf8Bytes)); return String.format(AUTHORIZATION_HEADER_FORMAT, accountName, signature); } catch (NoSuchAlgorithmException | InvalidKeyException ex) { throw new Error(ex); } } }
class SharedKeyCredential { private static final String AUTHORIZATION_HEADER_FORMAT = "SharedKey %s:%s"; private static final String ACCOUNT_NAME = "AccountName".toLowerCase(); private static final String ACCOUNT_KEY = "AccountKey".toLowerCase(); private final String accountName; private final byte[] accountKey; /** * Initializes a new instance of SharedKeyCredentials contains an account's name and its primary or secondary * accountKey. * * @param accountName The account name associated with the request. * @param accountKey The account access key used to authenticate the request. */ public SharedKeyCredential(String accountName, String accountKey) { this.accountName = accountName; this.accountKey = Base64.getDecoder().decode(accountKey); } /** * Creates a SharedKey credential from the passed connection string. * @param connectionString Connection string used to build the SharedKey credential. * @return a SharedKey credential if the connection string contains AccountName and AccountKey * @throws IllegalArgumentException If {@code connectionString} doesn't have AccountName or AccountKey. */ public static SharedKeyCredential fromConnectionString(String connectionString) { HashMap<String, String> connectionStringPieces = new HashMap<>(); for (String connectionStringPiece : connectionString.split(";")) { String[] kvp = connectionStringPiece.split("=", 2); connectionStringPieces.put(kvp[0].toLowerCase(), kvp[1]); } String accountName = connectionStringPieces.get(ACCOUNT_NAME); String accountKey = connectionStringPieces.get(ACCOUNT_KEY); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); } return new SharedKeyCredential(accountName, accountKey); } /** * Generates the SharedKey Authorization value from information in the request. * @param requestURL URL of the request * @param httpMethod HTTP method being used * @param headers Headers on the request * @return the SharedKey authorization value */ public String generateAuthorizationHeader(URL requestURL, String httpMethod, Map<String, String> headers) { return computeHMACSHA256(buildStringToSign(requestURL, httpMethod, headers)); } private String getAdditionalXmsHeaders(Map<String, String> headers) { final List<String> xmsHeaderNameArray = headers.entrySet().stream() .filter(entry -> entry.getKey().toLowerCase(Locale.ROOT).startsWith("x-ms-")) .filter(entry -> entry.getValue() != null) .map(Map.Entry::getKey) .collect(Collectors.toList()); if (xmsHeaderNameArray.isEmpty()) { return ""; } Collections.sort(xmsHeaderNameArray); final StringBuilder canonicalizedHeaders = new StringBuilder(); for (final String key : xmsHeaderNameArray) { if (canonicalizedHeaders.length() > 0) { canonicalizedHeaders.append('\n'); } canonicalizedHeaders.append(key) .append(':') .append(headers.get(key)); } return canonicalizedHeaders.toString(); } private String getCanonicalizedResource(URL requestURL) { final StringBuilder canonicalizedResource = new StringBuilder("/"); canonicalizedResource.append(accountName); if (requestURL.getPath().length() > 0) { canonicalizedResource.append(requestURL.getPath()); } else { canonicalizedResource.append('/'); } if (requestURL.getQuery() == null) { return canonicalizedResource.toString(); } QueryStringDecoder queryDecoder = new QueryStringDecoder("?" + requestURL.getQuery()); Map<String, List<String>> queryParams = queryDecoder.parameters(); ArrayList<String> queryParamNames = new ArrayList<>(queryParams.keySet()); Collections.sort(queryParamNames); for (String queryParamName : queryParamNames) { final List<String> queryParamValues = queryParams.get(queryParamName); Collections.sort(queryParamValues); String queryParamValuesStr = String.join(",", queryParamValues); canonicalizedResource.append("\n") .append(queryParamName.toLowerCase(Locale.ROOT)) .append(":") .append(queryParamValuesStr); } return canonicalizedResource.toString(); } private String computeHMACSHA256(String stringToSign) { try { Mac hmacSha256 = Mac.getInstance("HmacSHA256"); hmacSha256.init(new SecretKeySpec(accountKey, "HmacSHA256")); byte[] utf8Bytes = stringToSign.getBytes(StandardCharsets.UTF_8); String signature = Base64.getEncoder().encodeToString(hmacSha256.doFinal(utf8Bytes)); return String.format(AUTHORIZATION_HEADER_FORMAT, accountName, signature); } catch (NoSuchAlgorithmException | InvalidKeyException ex) { throw new Error(ex); } } }
Does this get the value for an environment variable by the name `AZURE_EVENT_HUBS_CONNECTION_STRING`? I don't believe any of the other languages support reading from env variables at the moment. Was this feature ported over from Track1?
public EventHubClient build() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { final String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Credentials have not been set using 'EventHubClientBuilder.credentials(String)'" + "EventHubClientBuilder.credentials(String, String, TokenCredential). And the connection string is" + "not set in the '" + AZURE_EVENT_HUBS_CONNECTION_STRING + "' environment variable."); } credential(connectionString); } if (timeout == null) { timeout = Duration.ofSeconds(60); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); if (retry == null) { retry = Retry.getDefaultRetry(); } proxyConfiguration = constructDefaultProxyConfiguration(configuration); if (scheduler == null) { scheduler = Schedulers.elastic(); } final CBSAuthorizationType authorizationType = credentials instanceof EventHubSharedAccessKeyCredential ? CBSAuthorizationType.SHARED_ACCESS_SIGNATURE : CBSAuthorizationType.JSON_WEB_TOKEN; final ConnectionOptions parameters = new ConnectionOptions(host, eventHubPath, credentials, authorizationType, timeout, transport, retry, proxyConfiguration, scheduler); return new EventHubClient(parameters, provider, handlerProvider); }
final String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING);
public EventHubClient build() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { final String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Credentials have not been set using 'EventHubClientBuilder.credentials(String)'" + "EventHubClientBuilder.credentials(String, String, TokenCredential). And the connection string is" + "not set in the '" + AZURE_EVENT_HUBS_CONNECTION_STRING + "' environment variable."); } credential(connectionString); } if (timeout == null) { timeout = Duration.ofSeconds(60); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); if (retry == null) { retry = Retry.getDefaultRetry(); } proxyConfiguration = constructDefaultProxyConfiguration(configuration); if (scheduler == null) { scheduler = Schedulers.elastic(); } final CBSAuthorizationType authorizationType = credentials instanceof EventHubSharedAccessKeyCredential ? CBSAuthorizationType.SHARED_ACCESS_SIGNATURE : CBSAuthorizationType.JSON_WEB_TOKEN; final ConnectionOptions parameters = new ConnectionOptions(host, eventHubPath, credentials, authorizationType, timeout, transport, retry, proxyConfiguration, scheduler); return new EventHubClient(parameters, provider, handlerProvider); }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private TokenCredential credentials; private Configuration configuration; private Duration timeout; private ProxyConfiguration proxyConfiguration; private Retry retry; private Scheduler scheduler; private TransportType transport; private String host; private String eventHubPath; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { transport = TransportType.AMQP; } /** * Sets the credential information given a connection string to the Event Hub instance. * * @param connectionString The connection string to the Event Hub this client wishes to connect to. It is expected * that the Event Hub path and the shared key properties are contained in this connection string. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code connectionString} is null or empty. Or, the {@code connectionString} * does not contain the "EntityPath" key, which is the name of the Event Hub instance. * @throws AzureException If the shared access signature token credential could not be created using the connection * string. */ public EventHubClientBuilder credential(String connectionString) { final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } return credential(properties.endpoint().getHost(), properties.eventHubPath(), tokenCredential); } /** * Sets the credential information given a connection string to the Event Hubs namespace and a path to a specific * Event Hub instance. * * @param connectionString The connection string to use for connecting to the Event Hubs namespace; it is expected * that the shared key properties are contained in this connection string, but not the Event Hub path. * @param eventHubPath The path of the specific Event Hub to connect the client to. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code connectionString} or {@code eventHubPath} is null or empty. Or, if the * {@code connectionString} contains the Event Hub path. * @throws AzureException If the shared access signature token credential could not be created using the connection * string. */ public EventHubClientBuilder credential(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("'eventHubPath' cannot be null or empty"); } final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } if (!ImplUtils.isNullOrEmpty(properties.eventHubPath())) { throw new IllegalArgumentException(String.format(Locale.US, "'connectionString' contains an Event Hub path [%s]. Please use the" + " credentials(String connectionString) overload. Or supply a 'connectionString' without" + " 'EntityPath' in it.", properties.eventHubPath())); } return credential(properties.endpoint().getHost(), eventHubPath, tokenCredential); } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param host The fully qualified host name for the Event Hubs namespace. This is likely to be similar to * {@literal {your-namespace}.servicebus.windows.net}. * @param eventHubPath The path of the specific Event Hub to connect the client to. * @param credential The token credential to use for authorization. Access controls may be specified by the Event * Hubs namespace or the requested Event Hub, depending on Azure configuration. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code host} or {@code eventHubPath} is null or empty. * @throws NullPointerException if {@code credentials} is null. */ public EventHubClientBuilder credential(String host, String eventHubPath, TokenCredential credential) { if (ImplUtils.isNullOrEmpty(host)) { throw new IllegalArgumentException("'host' cannot be null or empty"); } if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("'eventHubPath' cannot be null or empty."); } Objects.requireNonNull(credential); this.host = host; this.credentials = credential; this.eventHubPath = eventHubPath; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param timeout Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration timeout) { this.timeout = timeout; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retry The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(Retry retry) { this.retry = retry; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * Use the default not null values if the Connection parameters are not provided. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException if the credentials have not been set using either {@link * or {@link */ private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyConfiguration != null) { authentication = proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); Proxy proxy = null; if (proxyAddress != null) { final String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } final String host = hostPort[0]; final int port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } final String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); final String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); } }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private TokenCredential credentials; private Configuration configuration; private Duration timeout; private ProxyConfiguration proxyConfiguration; private Retry retry; private Scheduler scheduler; private TransportType transport; private String host; private String eventHubPath; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { transport = TransportType.AMQP; } /** * Sets the credential information given a connection string to the Event Hub instance. * * @param connectionString The connection string to the Event Hub this client wishes to connect to. It is expected * that the Event Hub path and the shared key properties are contained in this connection string. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code connectionString} is null or empty. Or, the {@code connectionString} * does not contain the "EntityPath" key, which is the name of the Event Hub instance. * @throws AzureException If the shared access signature token credential could not be created using the connection * string. */ public EventHubClientBuilder credential(String connectionString) { final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } return credential(properties.endpoint().getHost(), properties.eventHubPath(), tokenCredential); } /** * Sets the credential information given a connection string to the Event Hubs namespace and a path to a specific * Event Hub instance. * * @param connectionString The connection string to use for connecting to the Event Hubs namespace; it is expected * that the shared key properties are contained in this connection string, but not the Event Hub path. * @param eventHubPath The path of the specific Event Hub to connect the client to. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code connectionString} or {@code eventHubPath} is null or empty. Or, if the * {@code connectionString} contains the Event Hub path. * @throws AzureException If the shared access signature token credential could not be created using the connection * string. */ public EventHubClientBuilder credential(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("'eventHubPath' cannot be null or empty"); } final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } if (!ImplUtils.isNullOrEmpty(properties.eventHubPath())) { throw new IllegalArgumentException(String.format(Locale.US, "'connectionString' contains an Event Hub path [%s]. Please use the" + " credentials(String connectionString) overload. Or supply a 'connectionString' without" + " 'EntityPath' in it.", properties.eventHubPath())); } return credential(properties.endpoint().getHost(), eventHubPath, tokenCredential); } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param host The fully qualified host name for the Event Hubs namespace. This is likely to be similar to * {@literal {your-namespace}.servicebus.windows.net}. * @param eventHubPath The path of the specific Event Hub to connect the client to. * @param credential The token credential to use for authorization. Access controls may be specified by the Event * Hubs namespace or the requested Event Hub, depending on Azure configuration. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code host} or {@code eventHubPath} is null or empty. * @throws NullPointerException if {@code credentials} is null. */ public EventHubClientBuilder credential(String host, String eventHubPath, TokenCredential credential) { if (ImplUtils.isNullOrEmpty(host)) { throw new IllegalArgumentException("'host' cannot be null or empty"); } if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("'eventHubPath' cannot be null or empty."); } Objects.requireNonNull(credential); this.host = host; this.credentials = credential; this.eventHubPath = eventHubPath; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param timeout Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration timeout) { this.timeout = timeout; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retry The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(Retry retry) { this.retry = retry; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * Use the default not null values if the Connection parameters are not provided. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException if the credentials have not been set using either {@link * or {@link */ private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyConfiguration != null) { authentication = proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); Proxy proxy = null; if (proxyAddress != null) { final String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } final String host = hostPort[0]; final int port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } final String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); final String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); } }
no. It was something we added. is there no mention of reading from env vars? i can rmeove it.
public EventHubClient build() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { final String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Credentials have not been set using 'EventHubClientBuilder.credentials(String)'" + "EventHubClientBuilder.credentials(String, String, TokenCredential). And the connection string is" + "not set in the '" + AZURE_EVENT_HUBS_CONNECTION_STRING + "' environment variable."); } credential(connectionString); } if (timeout == null) { timeout = Duration.ofSeconds(60); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); if (retry == null) { retry = Retry.getDefaultRetry(); } proxyConfiguration = constructDefaultProxyConfiguration(configuration); if (scheduler == null) { scheduler = Schedulers.elastic(); } final CBSAuthorizationType authorizationType = credentials instanceof EventHubSharedAccessKeyCredential ? CBSAuthorizationType.SHARED_ACCESS_SIGNATURE : CBSAuthorizationType.JSON_WEB_TOKEN; final ConnectionOptions parameters = new ConnectionOptions(host, eventHubPath, credentials, authorizationType, timeout, transport, retry, proxyConfiguration, scheduler); return new EventHubClient(parameters, provider, handlerProvider); }
final String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING);
public EventHubClient build() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { final String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Credentials have not been set using 'EventHubClientBuilder.credentials(String)'" + "EventHubClientBuilder.credentials(String, String, TokenCredential). And the connection string is" + "not set in the '" + AZURE_EVENT_HUBS_CONNECTION_STRING + "' environment variable."); } credential(connectionString); } if (timeout == null) { timeout = Duration.ofSeconds(60); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); if (retry == null) { retry = Retry.getDefaultRetry(); } proxyConfiguration = constructDefaultProxyConfiguration(configuration); if (scheduler == null) { scheduler = Schedulers.elastic(); } final CBSAuthorizationType authorizationType = credentials instanceof EventHubSharedAccessKeyCredential ? CBSAuthorizationType.SHARED_ACCESS_SIGNATURE : CBSAuthorizationType.JSON_WEB_TOKEN; final ConnectionOptions parameters = new ConnectionOptions(host, eventHubPath, credentials, authorizationType, timeout, transport, retry, proxyConfiguration, scheduler); return new EventHubClient(parameters, provider, handlerProvider); }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private TokenCredential credentials; private Configuration configuration; private Duration timeout; private ProxyConfiguration proxyConfiguration; private Retry retry; private Scheduler scheduler; private TransportType transport; private String host; private String eventHubPath; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { transport = TransportType.AMQP; } /** * Sets the credential information given a connection string to the Event Hub instance. * * @param connectionString The connection string to the Event Hub this client wishes to connect to. It is expected * that the Event Hub path and the shared key properties are contained in this connection string. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code connectionString} is null or empty. Or, the {@code connectionString} * does not contain the "EntityPath" key, which is the name of the Event Hub instance. * @throws AzureException If the shared access signature token credential could not be created using the connection * string. */ public EventHubClientBuilder credential(String connectionString) { final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } return credential(properties.endpoint().getHost(), properties.eventHubPath(), tokenCredential); } /** * Sets the credential information given a connection string to the Event Hubs namespace and a path to a specific * Event Hub instance. * * @param connectionString The connection string to use for connecting to the Event Hubs namespace; it is expected * that the shared key properties are contained in this connection string, but not the Event Hub path. * @param eventHubPath The path of the specific Event Hub to connect the client to. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code connectionString} or {@code eventHubPath} is null or empty. Or, if the * {@code connectionString} contains the Event Hub path. * @throws AzureException If the shared access signature token credential could not be created using the connection * string. */ public EventHubClientBuilder credential(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("'eventHubPath' cannot be null or empty"); } final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } if (!ImplUtils.isNullOrEmpty(properties.eventHubPath())) { throw new IllegalArgumentException(String.format(Locale.US, "'connectionString' contains an Event Hub path [%s]. Please use the" + " credentials(String connectionString) overload. Or supply a 'connectionString' without" + " 'EntityPath' in it.", properties.eventHubPath())); } return credential(properties.endpoint().getHost(), eventHubPath, tokenCredential); } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param host The fully qualified host name for the Event Hubs namespace. This is likely to be similar to * {@literal {your-namespace}.servicebus.windows.net}. * @param eventHubPath The path of the specific Event Hub to connect the client to. * @param credential The token credential to use for authorization. Access controls may be specified by the Event * Hubs namespace or the requested Event Hub, depending on Azure configuration. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code host} or {@code eventHubPath} is null or empty. * @throws NullPointerException if {@code credentials} is null. */ public EventHubClientBuilder credential(String host, String eventHubPath, TokenCredential credential) { if (ImplUtils.isNullOrEmpty(host)) { throw new IllegalArgumentException("'host' cannot be null or empty"); } if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("'eventHubPath' cannot be null or empty."); } Objects.requireNonNull(credential); this.host = host; this.credentials = credential; this.eventHubPath = eventHubPath; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param timeout Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration timeout) { this.timeout = timeout; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retry The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(Retry retry) { this.retry = retry; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * Use the default not null values if the Connection parameters are not provided. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException if the credentials have not been set using either {@link * or {@link */ private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyConfiguration != null) { authentication = proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); Proxy proxy = null; if (proxyAddress != null) { final String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } final String host = hostPort[0]; final int port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } final String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); final String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); } }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private TokenCredential credentials; private Configuration configuration; private Duration timeout; private ProxyConfiguration proxyConfiguration; private Retry retry; private Scheduler scheduler; private TransportType transport; private String host; private String eventHubPath; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { transport = TransportType.AMQP; } /** * Sets the credential information given a connection string to the Event Hub instance. * * @param connectionString The connection string to the Event Hub this client wishes to connect to. It is expected * that the Event Hub path and the shared key properties are contained in this connection string. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code connectionString} is null or empty. Or, the {@code connectionString} * does not contain the "EntityPath" key, which is the name of the Event Hub instance. * @throws AzureException If the shared access signature token credential could not be created using the connection * string. */ public EventHubClientBuilder credential(String connectionString) { final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } return credential(properties.endpoint().getHost(), properties.eventHubPath(), tokenCredential); } /** * Sets the credential information given a connection string to the Event Hubs namespace and a path to a specific * Event Hub instance. * * @param connectionString The connection string to use for connecting to the Event Hubs namespace; it is expected * that the shared key properties are contained in this connection string, but not the Event Hub path. * @param eventHubPath The path of the specific Event Hub to connect the client to. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code connectionString} or {@code eventHubPath} is null or empty. Or, if the * {@code connectionString} contains the Event Hub path. * @throws AzureException If the shared access signature token credential could not be created using the connection * string. */ public EventHubClientBuilder credential(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("'eventHubPath' cannot be null or empty"); } final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } if (!ImplUtils.isNullOrEmpty(properties.eventHubPath())) { throw new IllegalArgumentException(String.format(Locale.US, "'connectionString' contains an Event Hub path [%s]. Please use the" + " credentials(String connectionString) overload. Or supply a 'connectionString' without" + " 'EntityPath' in it.", properties.eventHubPath())); } return credential(properties.endpoint().getHost(), eventHubPath, tokenCredential); } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param host The fully qualified host name for the Event Hubs namespace. This is likely to be similar to * {@literal {your-namespace}.servicebus.windows.net}. * @param eventHubPath The path of the specific Event Hub to connect the client to. * @param credential The token credential to use for authorization. Access controls may be specified by the Event * Hubs namespace or the requested Event Hub, depending on Azure configuration. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code host} or {@code eventHubPath} is null or empty. * @throws NullPointerException if {@code credentials} is null. */ public EventHubClientBuilder credential(String host, String eventHubPath, TokenCredential credential) { if (ImplUtils.isNullOrEmpty(host)) { throw new IllegalArgumentException("'host' cannot be null or empty"); } if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("'eventHubPath' cannot be null or empty."); } Objects.requireNonNull(credential); this.host = host; this.credentials = credential; this.eventHubPath = eventHubPath; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param timeout Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration timeout) { this.timeout = timeout; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retry The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(Retry retry) { this.retry = retry; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * Use the default not null values if the Connection parameters are not provided. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException if the credentials have not been set using either {@link * or {@link */ private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyConfiguration != null) { authentication = proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); Proxy proxy = null; if (proxyAddress != null) { final String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } final String host = hostPort[0]; final int port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } final String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); final String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); } }
You don't **have** to remove it, just don't document it until we have a consensus around it :) We can discuss this on Monday's sync up
public EventHubClient build() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { final String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Credentials have not been set using 'EventHubClientBuilder.credentials(String)'" + "EventHubClientBuilder.credentials(String, String, TokenCredential). And the connection string is" + "not set in the '" + AZURE_EVENT_HUBS_CONNECTION_STRING + "' environment variable."); } credential(connectionString); } if (timeout == null) { timeout = Duration.ofSeconds(60); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); if (retry == null) { retry = Retry.getDefaultRetry(); } proxyConfiguration = constructDefaultProxyConfiguration(configuration); if (scheduler == null) { scheduler = Schedulers.elastic(); } final CBSAuthorizationType authorizationType = credentials instanceof EventHubSharedAccessKeyCredential ? CBSAuthorizationType.SHARED_ACCESS_SIGNATURE : CBSAuthorizationType.JSON_WEB_TOKEN; final ConnectionOptions parameters = new ConnectionOptions(host, eventHubPath, credentials, authorizationType, timeout, transport, retry, proxyConfiguration, scheduler); return new EventHubClient(parameters, provider, handlerProvider); }
final String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING);
public EventHubClient build() { configuration = configuration == null ? ConfigurationManager.getConfiguration().clone() : configuration; if (credentials == null) { final String connectionString = configuration.get(AZURE_EVENT_HUBS_CONNECTION_STRING); if (ImplUtils.isNullOrEmpty(connectionString)) { throw new IllegalArgumentException("Credentials have not been set using 'EventHubClientBuilder.credentials(String)'" + "EventHubClientBuilder.credentials(String, String, TokenCredential). And the connection string is" + "not set in the '" + AZURE_EVENT_HUBS_CONNECTION_STRING + "' environment variable."); } credential(connectionString); } if (timeout == null) { timeout = Duration.ofSeconds(60); } final ReactorProvider provider = new ReactorProvider(); final ReactorHandlerProvider handlerProvider = new ReactorHandlerProvider(provider); if (retry == null) { retry = Retry.getDefaultRetry(); } proxyConfiguration = constructDefaultProxyConfiguration(configuration); if (scheduler == null) { scheduler = Schedulers.elastic(); } final CBSAuthorizationType authorizationType = credentials instanceof EventHubSharedAccessKeyCredential ? CBSAuthorizationType.SHARED_ACCESS_SIGNATURE : CBSAuthorizationType.JSON_WEB_TOKEN; final ConnectionOptions parameters = new ConnectionOptions(host, eventHubPath, credentials, authorizationType, timeout, transport, retry, proxyConfiguration, scheduler); return new EventHubClient(parameters, provider, handlerProvider); }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private TokenCredential credentials; private Configuration configuration; private Duration timeout; private ProxyConfiguration proxyConfiguration; private Retry retry; private Scheduler scheduler; private TransportType transport; private String host; private String eventHubPath; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { transport = TransportType.AMQP; } /** * Sets the credential information given a connection string to the Event Hub instance. * * @param connectionString The connection string to the Event Hub this client wishes to connect to. It is expected * that the Event Hub path and the shared key properties are contained in this connection string. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code connectionString} is null or empty. Or, the {@code connectionString} * does not contain the "EntityPath" key, which is the name of the Event Hub instance. * @throws AzureException If the shared access signature token credential could not be created using the connection * string. */ public EventHubClientBuilder credential(String connectionString) { final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } return credential(properties.endpoint().getHost(), properties.eventHubPath(), tokenCredential); } /** * Sets the credential information given a connection string to the Event Hubs namespace and a path to a specific * Event Hub instance. * * @param connectionString The connection string to use for connecting to the Event Hubs namespace; it is expected * that the shared key properties are contained in this connection string, but not the Event Hub path. * @param eventHubPath The path of the specific Event Hub to connect the client to. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code connectionString} or {@code eventHubPath} is null or empty. Or, if the * {@code connectionString} contains the Event Hub path. * @throws AzureException If the shared access signature token credential could not be created using the connection * string. */ public EventHubClientBuilder credential(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("'eventHubPath' cannot be null or empty"); } final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } if (!ImplUtils.isNullOrEmpty(properties.eventHubPath())) { throw new IllegalArgumentException(String.format(Locale.US, "'connectionString' contains an Event Hub path [%s]. Please use the" + " credentials(String connectionString) overload. Or supply a 'connectionString' without" + " 'EntityPath' in it.", properties.eventHubPath())); } return credential(properties.endpoint().getHost(), eventHubPath, tokenCredential); } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param host The fully qualified host name for the Event Hubs namespace. This is likely to be similar to * {@literal {your-namespace}.servicebus.windows.net}. * @param eventHubPath The path of the specific Event Hub to connect the client to. * @param credential The token credential to use for authorization. Access controls may be specified by the Event * Hubs namespace or the requested Event Hub, depending on Azure configuration. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code host} or {@code eventHubPath} is null or empty. * @throws NullPointerException if {@code credentials} is null. */ public EventHubClientBuilder credential(String host, String eventHubPath, TokenCredential credential) { if (ImplUtils.isNullOrEmpty(host)) { throw new IllegalArgumentException("'host' cannot be null or empty"); } if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("'eventHubPath' cannot be null or empty."); } Objects.requireNonNull(credential); this.host = host; this.credentials = credential; this.eventHubPath = eventHubPath; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param timeout Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration timeout) { this.timeout = timeout; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retry The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(Retry retry) { this.retry = retry; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * Use the default not null values if the Connection parameters are not provided. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException if the credentials have not been set using either {@link * or {@link */ private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyConfiguration != null) { authentication = proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); Proxy proxy = null; if (proxyAddress != null) { final String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } final String host = hostPort[0]; final int port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } final String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); final String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); } }
class EventHubClientBuilder { private static final String AZURE_EVENT_HUBS_CONNECTION_STRING = "AZURE_EVENT_HUBS_CONNECTION_STRING"; private TokenCredential credentials; private Configuration configuration; private Duration timeout; private ProxyConfiguration proxyConfiguration; private Retry retry; private Scheduler scheduler; private TransportType transport; private String host; private String eventHubPath; /** * Creates a new instance with the default transport {@link TransportType */ public EventHubClientBuilder() { transport = TransportType.AMQP; } /** * Sets the credential information given a connection string to the Event Hub instance. * * @param connectionString The connection string to the Event Hub this client wishes to connect to. It is expected * that the Event Hub path and the shared key properties are contained in this connection string. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code connectionString} is null or empty. Or, the {@code connectionString} * does not contain the "EntityPath" key, which is the name of the Event Hub instance. * @throws AzureException If the shared access signature token credential could not be created using the connection * string. */ public EventHubClientBuilder credential(String connectionString) { final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } return credential(properties.endpoint().getHost(), properties.eventHubPath(), tokenCredential); } /** * Sets the credential information given a connection string to the Event Hubs namespace and a path to a specific * Event Hub instance. * * @param connectionString The connection string to use for connecting to the Event Hubs namespace; it is expected * that the shared key properties are contained in this connection string, but not the Event Hub path. * @param eventHubPath The path of the specific Event Hub to connect the client to. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code connectionString} or {@code eventHubPath} is null or empty. Or, if the * {@code connectionString} contains the Event Hub path. * @throws AzureException If the shared access signature token credential could not be created using the connection * string. */ public EventHubClientBuilder credential(String connectionString, String eventHubPath) { if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("'eventHubPath' cannot be null or empty"); } final ConnectionStringProperties properties = new ConnectionStringProperties(connectionString); final TokenCredential tokenCredential; try { tokenCredential = new EventHubSharedAccessKeyCredential(properties.sharedAccessKeyName(), properties.sharedAccessKey(), ClientConstants.TOKEN_VALIDITY); } catch (InvalidKeyException | NoSuchAlgorithmException e) { throw new AzureException("Could not create the EventHubSharedAccessKeyCredential.", e); } if (!ImplUtils.isNullOrEmpty(properties.eventHubPath())) { throw new IllegalArgumentException(String.format(Locale.US, "'connectionString' contains an Event Hub path [%s]. Please use the" + " credentials(String connectionString) overload. Or supply a 'connectionString' without" + " 'EntityPath' in it.", properties.eventHubPath())); } return credential(properties.endpoint().getHost(), eventHubPath, tokenCredential); } /** * Sets the credential information for which Event Hub instance to connect to, and how to authorize against it. * * @param host The fully qualified host name for the Event Hubs namespace. This is likely to be similar to * {@literal {your-namespace}.servicebus.windows.net}. * @param eventHubPath The path of the specific Event Hub to connect the client to. * @param credential The token credential to use for authorization. Access controls may be specified by the Event * Hubs namespace or the requested Event Hub, depending on Azure configuration. * @return The updated EventHubClientBuilder object. * @throws IllegalArgumentException if {@code host} or {@code eventHubPath} is null or empty. * @throws NullPointerException if {@code credentials} is null. */ public EventHubClientBuilder credential(String host, String eventHubPath, TokenCredential credential) { if (ImplUtils.isNullOrEmpty(host)) { throw new IllegalArgumentException("'host' cannot be null or empty"); } if (ImplUtils.isNullOrEmpty(eventHubPath)) { throw new IllegalArgumentException("'eventHubPath' cannot be null or empty."); } Objects.requireNonNull(credential); this.host = host; this.credentials = credential; this.eventHubPath = eventHubPath; return this; } /** * Sets the transport type by which all the communication with Azure Event Hubs occurs. * Default value is {@link TransportType * * @param transport The transport type to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder transportType(TransportType transport) { this.transport = transport; return this; } /** * Sets the timeout for each connection, link, and session. * * @param timeout Duration for timeout. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder timeout(Duration timeout) { this.timeout = timeout; return this; } /** * Sets the scheduler for operations such as connecting to and receiving or sending data to Event Hubs. If none is * specified, an elastic pool is used. * * @param scheduler The scheduler for operations such as connecting to and receiving or sending data to Event Hubs. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder scheduler(Scheduler scheduler) { this.scheduler = scheduler; return this; } /** * Sets the proxy configuration for EventHubClient. * * @param proxyConfiguration The proxy configuration to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder proxyConfiguration(ProxyConfiguration proxyConfiguration) { this.proxyConfiguration = proxyConfiguration; return this; } /** * Sets the retry policy for EventHubClient. * * @param retry The retry policy to use. * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder retry(Retry retry) { this.retry = retry; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated EventHubClientBuilder object. */ public EventHubClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Creates a new {@link EventHubClient} based on the configuration set in this builder. * Use the default not null values if the Connection parameters are not provided. * * @return A new {@link EventHubClient} instance. * @throws IllegalArgumentException if the credentials have not been set using either {@link * or {@link */ private ProxyConfiguration constructDefaultProxyConfiguration(Configuration configuration) { ProxyAuthenticationType authentication = ProxyAuthenticationType.NONE; if (proxyConfiguration != null) { authentication = proxyConfiguration.authentication(); } String proxyAddress = configuration.get(BaseConfigurations.HTTP_PROXY); Proxy proxy = null; if (proxyAddress != null) { final String[] hostPort = proxyAddress.split(":"); if (hostPort.length < 2) { throw new IllegalArgumentException("HTTP_PROXY cannot be parsed into a proxy"); } final String host = hostPort[0]; final int port = Integer.parseInt(hostPort[1]); proxy = new Proxy(Proxy.Type.HTTP, new InetSocketAddress(host, port)); } final String username = configuration.get(ProxyConfiguration.PROXY_USERNAME); final String password = configuration.get(ProxyConfiguration.PROXY_PASSWORD); return new ProxyConfiguration(authentication, proxy, username, password); } }
nice. You've got an elegant way to do this, at least.
public Mono<Void> send(EventData event) { Objects.requireNonNull(event); return send(Flux.just(event)); }
return send(Flux.just(event));
public Mono<Void> send(EventData event) { Objects.requireNonNull(event); return send(Flux.just(event)); }
class EventSender implements Closeable { /** * The default maximum allowable size, in bytes, for a batch to be sent. */ public static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final SendOptions DEFAULT_BATCHING_OPTIONS = new SendOptions(); private final ServiceLogger logger = new ServiceLogger(EventSender.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final EventSenderOptions senderOptions; private final Mono<AmqpSendLink> sendLinkMono; private final boolean isPartitionSender; /** * Creates a new instance of this EventSender with batches that are {@code maxMessageSize} and sends messages to { * * @code partitionId}. */ EventSender(Mono<AmqpSendLink> amqpSendLinkMono, EventSenderOptions options) { this.sendLinkMono = amqpSendLinkMono.cache(); this.senderOptions = options; this.isPartitionSender = !ImplUtils.isNullOrEmpty(options.partitionId()); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * For more information regarding the maximum event size allowed, see * <a href="https: * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * For more information regarding the maximum event size allowed, see * <a href="https: * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { Objects.requireNonNull(event); Objects.requireNonNull(options); return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { Objects.requireNonNull(events); return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { Objects.requireNonNull(events); return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Publisher<EventData> events) { Objects.requireNonNull(events); return sendInternal(Flux.from(events), DEFAULT_BATCHING_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Publisher<EventData> events, SendOptions options) { Objects.requireNonNull(events); Objects.requireNonNull(options); return sendInternal(Flux.from(events), options); } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.partitionKey(); if (!ImplUtils.isNullOrEmpty(partitionKey)) { if (isPartitionSender) { throw new IllegalArgumentException(String.format(Locale.US, "SendOptions.partitionKey() cannot be set when an EventSender is " + "created with EventSenderOptions.partitionId() set. This EventSender can only send events to partition '%s'.", senderOptions.partitionId())); } else if (partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { throw new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH)); } } return events.collect(new EventDataCollector(options, 1)) .flatMap(list -> send(Flux.fromIterable(list))); } private Mono<Void> send(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.asError().log("Error sending batch.", error); }); } private Mono<Void> send(EventDataBatch batch) { if (batch.getEvents().isEmpty()) { logger.asInfo().log("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.asInfo().log("Sending with partitionKey[{}], batch size[{}]", batch.getPartitionKey(), batch.getSize()); final List<Message> messages = EventDataUtil.toAmqpMessage(batch.getPartitionKey(), batch.getEvents()); return sendLinkMono.flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } /** * Disposes of the EventSender by closing the underlying connection to the service. * * @throws IOException if the underlying {@link AmqpLink} and its resources could not be disposed. */ @Override public void close() throws IOException { if (!isDisposed.getAndSet(true)) { final AmqpSendLink block = sendLinkMono.block(senderOptions.timeout()); if (block != null) { block.close(); } } } /* * Collects EventData into EventDataBatch to send to Event Hubs. If maxNumberOfBatches is null then it'll collect as * many batches as possible. Otherwise, if there are more events than can fit into maxNumberOfBatches, then the * collector throws a PayloadSizeExceededException. */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final int maxMessageSize; private final Integer maxNumberOfBatches; private volatile EventDataBatch currentBatch; EventDataCollector(SendOptions options, Integer maxNumberOfBatches) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.maximumSizeInBytes(); this.partitionKey = options.partitionKey(); currentBatch = new EventDataBatch(options.maximumSizeInBytes(), options.partitionKey()); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches)); } currentBatch = new EventDataBatch(maxMessageSize, partitionKey); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventSender implements Closeable { /** * The default maximum allowable size, in bytes, for a batch to be sent. */ public static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private final ServiceLogger logger = new ServiceLogger(EventSender.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final EventSenderOptions senderOptions; private final Mono<AmqpSendLink> sendLinkMono; private final boolean isPartitionSender; /** * Creates a new instance of this EventSender with batches that are {@code maxMessageSize} and sends messages to { * * @code partitionId}. */ EventSender(Mono<AmqpSendLink> amqpSendLinkMono, EventSenderOptions options) { this.sendLinkMono = amqpSendLinkMono.cache(); this.senderOptions = options; this.isPartitionSender = !ImplUtils.isNullOrEmpty(options.partitionId()); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * For more information regarding the maximum event size allowed, see * <a href="https: * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * For more information regarding the maximum event size allowed, see * <a href="https: * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { Objects.requireNonNull(event); Objects.requireNonNull(options); return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { Objects.requireNonNull(events); return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { Objects.requireNonNull(events); return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Publisher<EventData> events) { Objects.requireNonNull(events); return sendInternal(Flux.from(events), DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Publisher<EventData> events, SendOptions options) { Objects.requireNonNull(events); Objects.requireNonNull(options); return sendInternal(Flux.from(events), options); } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.partitionKey(); if (!ImplUtils.isNullOrEmpty(partitionKey)) { if (isPartitionSender) { throw new IllegalArgumentException(String.format(Locale.US, "SendOptions.partitionKey() cannot be set when an EventSender is " + "created with EventSenderOptions.partitionId() set. This EventSender can only send events to partition '%s'.", senderOptions.partitionId())); } else if (partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { throw new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH)); } } return events.collect(new EventDataCollector(options, 1)) .flatMap(list -> send(Flux.fromIterable(list))); } private Mono<Void> send(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.asError().log("Error sending batch.", error); }); } private Mono<Void> send(EventDataBatch batch) { if (batch.getEvents().isEmpty()) { logger.asInfo().log("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.asInfo().log("Sending with partitionKey[{}], batch size[{}]", batch.getPartitionKey(), batch.getSize()); final List<Message> messages = EventDataUtil.toAmqpMessage(batch.getPartitionKey(), batch.getEvents()); return sendLinkMono.flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } /** * Disposes of the EventSender by closing the underlying connection to the service. * * @throws IOException if the underlying {@link AmqpLink} and its resources could not be disposed. */ @Override public void close() throws IOException { if (!isDisposed.getAndSet(true)) { final AmqpSendLink block = sendLinkMono.block(senderOptions.timeout()); if (block != null) { block.close(); } } } /* * Collects EventData into EventDataBatch to send to Event Hubs. If maxNumberOfBatches is null then it'll collect as * many batches as possible. Otherwise, if there are more events than can fit into maxNumberOfBatches, then the * collector throws a PayloadSizeExceededException. */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final int maxMessageSize; private final Integer maxNumberOfBatches; private volatile EventDataBatch currentBatch; EventDataCollector(SendOptions options, Integer maxNumberOfBatches) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.maximumSizeInBytes(); this.partitionKey = options.partitionKey(); currentBatch = new EventDataBatch(options.maximumSizeInBytes(), options.partitionKey()); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches)); } currentBatch = new EventDataBatch(maxMessageSize, partitionKey); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
I do not see need for it now and confirmed with Alan as well.
public void log(String format, Object... args) { if (canLogAtLevel(level)) { performLogging(format, args); } }
}
public void log(String format, Object... args) { if (canLogAtLevel(level)) { performLogging(format, args); } }
class name using the {@link LoggerFactory}
class name using the {@link LoggerFactory}
I believe this is incorrect - if no configuration is specified, we should use the global configuration, not 'no' configuration (i.e. an empty Map). Please confirm with @alzimmermsft (and maybe ensure that all other client libraries aren't doing this either!)
private AzureBlobStorageBuilder buildImpl() { Objects.requireNonNull(endpoint); Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (configuration == null) { configuration = Configuration.NONE; } policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (sharedKeyCredential != null) { policies.add(new SharedKeyCredentialPolicy(sharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s: } else if (sasTokenCredential != null) { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); } else { policies.add(new AnonymousCredentialPolicy()); } policies.add(retryPolicy); policies.addAll(this.policies); policies.add(new HttpLoggingPolicy(logLevel)); HttpPipeline pipeline = HttpPipeline.builder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint.toString(), containerName, blobName)) .pipeline(pipeline); }
}
private AzureBlobStorageBuilder buildImpl() { Objects.requireNonNull(endpoint); Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (configuration == null) { configuration = ConfigurationManager.getConfiguration(); } policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (sharedKeyCredential != null) { policies.add(new SharedKeyCredentialPolicy(sharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); } else { policies.add(new AnonymousCredentialPolicy()); } policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(this.policies); policies.add(new HttpLoggingPolicy(logLevel)); HttpPipeline pipeline = HttpPipeline.builder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline); }
class AppendBlobClientBuilder { private static final String ACCOUNT_NAME = "AccountName".toLowerCase(); private static final String ACCOUNT_KEY = "AccountKey".toLowerCase(); private static final String ENDPOINT_PROTOCOL = "DefaultEndpointsProtocol".toLowerCase(); private static final String ENDPOINT_SUFFIX = "EndpointSuffix".toLowerCase(); private final List<HttpPipelinePolicy> policies; private URL endpoint; private String containerName; private String blobName; private SharedKeyCredential sharedKeyCredential; private TokenCredential tokenCredential; private SASTokenCredential sasTokenCredential; private HttpClient httpClient; private HttpLogDetailLevel logLevel; private RetryPolicy retryPolicy; private Configuration configuration; public AppendBlobClientBuilder() { retryPolicy = new RetryPolicy(); logLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); } /** * @return a {@link AppendBlobClient} created from the configurations in this builder. */ public AppendBlobClient buildClient() { return new AppendBlobClient(buildAsyncClient()); } /** * @return a {@link AppendBlobAsyncClient} created from the configurations in this builder. */ public AppendBlobAsyncClient buildAsyncClient() { return new AppendBlobAsyncClient(buildImpl()); } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * @param endpoint URL of the service * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); URL url; try { url = new URL(endpoint); this.endpoint = new URL(url.getProtocol() + ": String path = url.getPath(); if (path != null && !path.isEmpty() && !path.equals("/")) { path = path.replaceAll("^/", "").replaceAll("/$", ""); String[] segments = path.split("/", 2); if (segments.length != 2) { throw new IllegalArgumentException("Endpoint should contain 0 or at least 2 path segments"); } else { this.containerName = segments[0]; this.blobName = segments[1]; } } } catch (MalformedURLException ex) { throw new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."); } SASTokenCredential credential = SASTokenCredential.fromQuery(url.getQuery()); if (credential != null) { this.sasTokenCredential = credential; } return this; } /** * Sets the name of the container this client is connecting to. * @param containerName the name of the container * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob this client is connecting to. * @param blobName the name of the blob * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder blobName(String blobName) { this.blobName = blobName; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credentials(SharedKeyCredential credentials) { this.sharedKeyCredential = credentials; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credentials(TokenCredential credentials) { this.tokenCredential = credentials; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credentials(SASTokenCredential credentials) { this.sasTokenCredential = credentials; return this; } /** * Clears the credentials used to authorize requests sent to the service * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder anonymousCredentials() { this.sharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the connection string for the service, parses it for authentication information (account name, account key) * @param connectionString connection string from access keys section * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> connectionKVPs = new HashMap<>(); for (String s : connectionString.split(";")) { String[] kvp = s.split("=", 2); connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); } String accountName = connectionKVPs.get(ACCOUNT_NAME); String accountKey = connectionKVPs.get(ACCOUNT_KEY); String endpointProtocol = connectionKVPs.get(ENDPOINT_PROTOCOL); String endpointSuffix = connectionKVPs.get(ENDPOINT_SUFFIX); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); } if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { String endpoint = String.format("%s: endpoint(endpoint); } return credentials(new SharedKeyCredential(accountName, accountKey)); } /** * Sets the http client used to send service requests * @param httpClient http client to send requests * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent * @param pipelinePolicy a pipeline policy * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.policies.add(pipelinePolicy); return this; } /** * Sets the logging level for service requests * @param logLevel logging level * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { this.logLevel = logLevel; return this; } /** * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE * @param configuration configuration store * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } }
class AppendBlobClientBuilder { private static final String ACCOUNT_NAME = "accountname"; private static final String ACCOUNT_KEY = "accountkey"; private static final String ENDPOINT_PROTOCOL = "defaultendpointsprotocol"; private static final String ENDPOINT_SUFFIX = "endpointsuffix"; private final List<HttpPipelinePolicy> policies; private String endpoint; private String containerName; private String blobName; private String snapshot; private SharedKeyCredential sharedKeyCredential; private TokenCredential tokenCredential; private SASTokenCredential sasTokenCredential; private HttpClient httpClient; private HttpLogDetailLevel logLevel; private RequestRetryOptions retryOptions; private Configuration configuration; public AppendBlobClientBuilder() { retryOptions = new RequestRetryOptions(); logLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); } /** * @return a {@link AppendBlobClient} created from the configurations in this builder. */ public AppendBlobClient buildClient() { return new AppendBlobClient(buildAsyncClient()); } /** * @return a {@link AppendBlobAsyncClient} created from the configurations in this builder. */ public AppendBlobAsyncClient buildAsyncClient() { return new AppendBlobAsyncClient(buildImpl(), snapshot); } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * @param endpoint URL of the service * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); URL url; try { url = new URL(endpoint); BlobURLParts parts = URLParser.parse(url); this.endpoint = parts.scheme() + ": if (parts.containerName() != null) { this.containerName = parts.containerName(); } if (parts.blobName() != null) { this.blobName = parts.blobName(); } if (parts.snapshot() != null) { this.snapshot = parts.snapshot(); } } catch (MalformedURLException | UnknownHostException ex) { throw new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."); } SASTokenCredential credential = SASTokenCredential.fromQuery(url.getQuery()); if (credential != null) { this.credential(credential); } return this; } /** * Sets the name of the container this client is connecting to. * @param containerName the name of the container * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob this client is connecting to. * @param blobName the name of the blob * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder blobName(String blobName) { this.blobName = blobName; return this; } /** * Sets the snapshot of the blob this client is connecting to. * @param snapshot the snapshot identifier for the blob * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credential(SharedKeyCredential credential) { this.sharedKeyCredential = credential; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = credential; this.sharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credential(SASTokenCredential credential) { this.sasTokenCredential = credential; this.sharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Clears the credential used to authorize requests sent to the service * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder anonymousCredential() { this.sharedKeyCredential = null; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the connection string for the service, parses it for authentication information (account name, account key) * @param connectionString connection string from access keys section * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> connectionKVPs = new HashMap<>(); for (String s : connectionString.split(";")) { String[] kvp = s.split("=", 2); connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); } String accountName = connectionKVPs.get(ACCOUNT_NAME); String accountKey = connectionKVPs.get(ACCOUNT_KEY); String endpointProtocol = connectionKVPs.get(ENDPOINT_PROTOCOL); String endpointSuffix = connectionKVPs.get(ENDPOINT_SUFFIX); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); } if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { String endpoint = String.format("%s: endpoint(endpoint); } return credential(new SharedKeyCredential(accountName, accountKey)); } /** * Sets the http client used to send service requests * @param httpClient http client to send requests * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent * @param pipelinePolicy a pipeline policy * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.policies.add(pipelinePolicy); return this; } /** * Sets the logging level for service requests * @param logLevel logging level * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { this.logLevel = logLevel; return this; } /** * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE * @param configuration configuration store * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * @param retryOptions the options to configure retry behaviors * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } }
This type of URL parsing will be used a lot in the Storage SDKs, should we promote this functionality to a utility class in commons? Potentially move this into Azure Core if we see fit?
public BlobClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); URL url; try { url = new URL(endpoint); this.endpoint = new URL(url.getProtocol() + ": String path = url.getPath(); if (path != null && !path.isEmpty() && !path.equals("/")) { path = path.replaceAll("^/", "").replaceAll("/$", ""); String[] segments = path.split("/", 2); if (segments.length != 2) { throw new IllegalArgumentException("Endpoint should contain 0 or at least 2 path segments"); } else { this.containerName = segments[0]; this.blobName = segments[1]; } } } catch (MalformedURLException ex) { throw new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."); } SASTokenCredential credential = SASTokenCredential.fromQuery(url.getQuery()); if (credential != null) { this.sasTokenCredential = credential; } return this; }
url = new URL(endpoint);
public BlobClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); URL url; try { url = new URL(endpoint); BlobURLParts parts = URLParser.parse(url); this.endpoint = parts.scheme() + ": if (parts.containerName() != null) { this.containerName = parts.containerName(); } if (parts.blobName() != null) { this.blobName = parts.blobName(); } if (parts.snapshot() != null) { this.snapshot = parts.snapshot(); } } catch (MalformedURLException | UnknownHostException ex) { throw new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."); } SASTokenCredential credential = SASTokenCredential.fromQuery(url.getQuery()); if (credential != null) { this.credential(credential); } return this; }
class BlobClientBuilder { private static final String ACCOUNT_NAME = "AccountName".toLowerCase(); private static final String ACCOUNT_KEY = "AccountKey".toLowerCase(); private static final String ENDPOINT_PROTOCOL = "DefaultEndpointsProtocol".toLowerCase(); private static final String ENDPOINT_SUFFIX = "EndpointSuffix".toLowerCase(); private final List<HttpPipelinePolicy> policies; private URL endpoint; private String containerName; private String blobName; private SharedKeyCredential sharedKeyCredential; private TokenCredential tokenCredential; private SASTokenCredential sasTokenCredential; private HttpClient httpClient; private HttpLogDetailLevel logLevel; private RetryPolicy retryPolicy; private Configuration configuration; public BlobClientBuilder() { retryPolicy = new RetryPolicy(); logLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); } private AzureBlobStorageBuilder buildImpl() { Objects.requireNonNull(endpoint); Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (configuration == null) { configuration = Configuration.NONE; } policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (sharedKeyCredential != null) { policies.add(new SharedKeyCredentialPolicy(sharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s: } else if (sasTokenCredential != null) { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); } else { policies.add(new AnonymousCredentialPolicy()); } policies.add(retryPolicy); policies.addAll(this.policies); policies.add(new HttpLoggingPolicy(logLevel)); HttpPipeline pipeline = HttpPipeline.builder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint.toString(), containerName, blobName)) .pipeline(pipeline); } /** * @return a {@link BlobClient} created from the configurations in this builder. */ public BlobClient buildClient() { return new BlobClient(buildAsyncClient()); } /** * @return a {@link BlobAsyncClient} created from the configurations in this builder. */ public BlobAsyncClient buildAsyncClient() { return new BlobAsyncClient(buildImpl()); } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * @param endpoint URL of the service * @return the updated BlobClientBuilder object */ /** * Sets the name of the container this client is connecting to. * @param containerName the name of the container * @return the updated BlobClientBuilder object */ public BlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob this client is connecting to. * @param blobName the name of the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder blobName(String blobName) { this.blobName = blobName; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated BlobClientBuilder object */ public BlobClientBuilder credentials(SharedKeyCredential credentials) { this.sharedKeyCredential = credentials; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated BlobClientBuilder object */ public BlobClientBuilder credentials(TokenCredential credentials) { this.tokenCredential = credentials; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated BlobClientBuilder object */ public BlobClientBuilder credentials(SASTokenCredential credentials) { this.sasTokenCredential = credentials; return this; } /** * Clears the credentials used to authorize requests sent to the service * @return the updated BlobClientBuilder object */ public BlobClientBuilder anonymousCredentials() { this.sharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the connection string for the service, parses it for authentication information (account name, account key) * @param connectionString connection string from access keys section * @return the updated BlobClientBuilder object */ public BlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> connectionKVPs = new HashMap<>(); for (String s : connectionString.split(";")) { String[] kvp = s.split("=", 2); connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); } String accountName = connectionKVPs.get(ACCOUNT_NAME); String accountKey = connectionKVPs.get(ACCOUNT_KEY); String endpointProtocol = connectionKVPs.get(ENDPOINT_PROTOCOL); String endpointSuffix = connectionKVPs.get(ENDPOINT_SUFFIX); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); } if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { String endpoint = String.format("%s: endpoint(endpoint); } return credentials(new SharedKeyCredential(accountName, accountKey)); } /** * Sets the http client used to send service requests * @param httpClient http client to send requests * @return the updated BlobClientBuilder object */ public BlobClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent * @param pipelinePolicy a pipeline policy * @return the updated BlobClientBuilder object */ public BlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.policies.add(pipelinePolicy); return this; } /** * Sets the logging level for service requests * @param logLevel logging level * @return the updated BlobClientBuilder object */ public BlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { this.logLevel = logLevel; return this; } /** * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE * @param configuration configuration store * @return the updated BlobClientBuilder object */ public BlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } }
class BlobClientBuilder { private static final String ACCOUNT_NAME = "accountname"; private static final String ACCOUNT_KEY = "accountkey"; private static final String ENDPOINT_PROTOCOL = "defaultendpointsprotocol"; private static final String ENDPOINT_SUFFIX = "endpointsuffix"; private final List<HttpPipelinePolicy> policies; private String endpoint; private String containerName; private String blobName; private String snapshot; private SharedKeyCredential sharedKeyCredential; private TokenCredential tokenCredential; private SASTokenCredential sasTokenCredential; private HttpClient httpClient; private HttpLogDetailLevel logLevel; private RequestRetryOptions retryOptions; private Configuration configuration; public BlobClientBuilder() { retryOptions = new RequestRetryOptions(); logLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); } private AzureBlobStorageBuilder buildImpl() { Objects.requireNonNull(endpoint); Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (configuration == null) { configuration = ConfigurationManager.getConfiguration(); } policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (sharedKeyCredential != null) { policies.add(new SharedKeyCredentialPolicy(sharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); } else { policies.add(new AnonymousCredentialPolicy()); } policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(this.policies); policies.add(new HttpLoggingPolicy(logLevel)); HttpPipeline pipeline = HttpPipeline.builder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline); } /** * @return a {@link BlobClient} created from the configurations in this builder. */ public BlobClient buildClient() { return new BlobClient(buildAsyncClient()); } /** * @return a {@link BlobAsyncClient} created from the configurations in this builder. */ public BlobAsyncClient buildAsyncClient() { return new BlobAsyncClient(buildImpl(), snapshot); } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * @param endpoint URL of the service * @return the updated BlobClientBuilder object */ /** * Sets the name of the container this client is connecting to. * @param containerName the name of the container * @return the updated BlobClientBuilder object */ public BlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob this client is connecting to. * @param blobName the name of the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder blobName(String blobName) { this.blobName = blobName; return this; } /** * Sets the snapshot of the blob this client is connecting to. * @param snapshot the snapshot identifier for the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated BlobClientBuilder object */ public BlobClientBuilder credential(SharedKeyCredential credential) { this.sharedKeyCredential = credential; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated BlobClientBuilder object */ public BlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = credential; this.sharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated BlobClientBuilder object */ public BlobClientBuilder credential(SASTokenCredential credential) { this.sasTokenCredential = credential; this.sharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Clears the credential used to authorize requests sent to the service * @return the updated BlobClientBuilder object */ public BlobClientBuilder anonymousCredential() { this.sharedKeyCredential = null; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the connection string for the service, parses it for authentication information (account name, account key) * @param connectionString connection string from access keys section * @return the updated BlobClientBuilder object */ public BlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> connectionKVPs = new HashMap<>(); for (String s : connectionString.split(";")) { String[] kvp = s.split("=", 2); connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); } String accountName = connectionKVPs.get(ACCOUNT_NAME); String accountKey = connectionKVPs.get(ACCOUNT_KEY); String endpointProtocol = connectionKVPs.get(ENDPOINT_PROTOCOL); String endpointSuffix = connectionKVPs.get(ENDPOINT_SUFFIX); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); } if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { String endpoint = String.format("%s: endpoint(endpoint); } return credential(new SharedKeyCredential(accountName, accountKey)); } /** * Sets the http client used to send service requests * @param httpClient http client to send requests * @return the updated BlobClientBuilder object */ public BlobClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent * @param pipelinePolicy a pipeline policy * @return the updated BlobClientBuilder object */ public BlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.policies.add(pipelinePolicy); return this; } /** * Sets the logging level for service requests * @param logLevel logging level * @return the updated BlobClientBuilder object */ public BlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { this.logLevel = logLevel; return this; } /** * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE * @param configuration configuration store * @return the updated BlobClientBuilder object */ public BlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * @param retryOptions the options to configure retry behaviors * @return the updated BlobClientBuilder object */ public BlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } }
In the other SDKs the default is to use the global configuration, it is a fair question to ask if the configuration store should be opt-in (default to Configuration.NONE) or opt-out (default to the global configuration store). Both approaches have their pros and cons. For now we should switch this to align with the other SDKs but have a quick chat about opt-in vs opt-out on configurations.
private AzureBlobStorageBuilder buildImpl() { Objects.requireNonNull(endpoint); Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (configuration == null) { configuration = Configuration.NONE; } policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (sharedKeyCredential != null) { policies.add(new SharedKeyCredentialPolicy(sharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s: } else if (sasTokenCredential != null) { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); } else { policies.add(new AnonymousCredentialPolicy()); } policies.add(retryPolicy); policies.addAll(this.policies); policies.add(new HttpLoggingPolicy(logLevel)); HttpPipeline pipeline = HttpPipeline.builder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint.toString(), containerName, blobName)) .pipeline(pipeline); }
}
private AzureBlobStorageBuilder buildImpl() { Objects.requireNonNull(endpoint); Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (configuration == null) { configuration = ConfigurationManager.getConfiguration(); } policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (sharedKeyCredential != null) { policies.add(new SharedKeyCredentialPolicy(sharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); } else { policies.add(new AnonymousCredentialPolicy()); } policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(this.policies); policies.add(new HttpLoggingPolicy(logLevel)); HttpPipeline pipeline = HttpPipeline.builder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline); }
class AppendBlobClientBuilder { private static final String ACCOUNT_NAME = "AccountName".toLowerCase(); private static final String ACCOUNT_KEY = "AccountKey".toLowerCase(); private static final String ENDPOINT_PROTOCOL = "DefaultEndpointsProtocol".toLowerCase(); private static final String ENDPOINT_SUFFIX = "EndpointSuffix".toLowerCase(); private final List<HttpPipelinePolicy> policies; private URL endpoint; private String containerName; private String blobName; private SharedKeyCredential sharedKeyCredential; private TokenCredential tokenCredential; private SASTokenCredential sasTokenCredential; private HttpClient httpClient; private HttpLogDetailLevel logLevel; private RetryPolicy retryPolicy; private Configuration configuration; public AppendBlobClientBuilder() { retryPolicy = new RetryPolicy(); logLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); } /** * @return a {@link AppendBlobClient} created from the configurations in this builder. */ public AppendBlobClient buildClient() { return new AppendBlobClient(buildAsyncClient()); } /** * @return a {@link AppendBlobAsyncClient} created from the configurations in this builder. */ public AppendBlobAsyncClient buildAsyncClient() { return new AppendBlobAsyncClient(buildImpl()); } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * @param endpoint URL of the service * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); URL url; try { url = new URL(endpoint); this.endpoint = new URL(url.getProtocol() + ": String path = url.getPath(); if (path != null && !path.isEmpty() && !path.equals("/")) { path = path.replaceAll("^/", "").replaceAll("/$", ""); String[] segments = path.split("/", 2); if (segments.length != 2) { throw new IllegalArgumentException("Endpoint should contain 0 or at least 2 path segments"); } else { this.containerName = segments[0]; this.blobName = segments[1]; } } } catch (MalformedURLException ex) { throw new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."); } SASTokenCredential credential = SASTokenCredential.fromQuery(url.getQuery()); if (credential != null) { this.sasTokenCredential = credential; } return this; } /** * Sets the name of the container this client is connecting to. * @param containerName the name of the container * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob this client is connecting to. * @param blobName the name of the blob * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder blobName(String blobName) { this.blobName = blobName; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credentials(SharedKeyCredential credentials) { this.sharedKeyCredential = credentials; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credentials(TokenCredential credentials) { this.tokenCredential = credentials; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credentials(SASTokenCredential credentials) { this.sasTokenCredential = credentials; return this; } /** * Clears the credentials used to authorize requests sent to the service * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder anonymousCredentials() { this.sharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the connection string for the service, parses it for authentication information (account name, account key) * @param connectionString connection string from access keys section * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> connectionKVPs = new HashMap<>(); for (String s : connectionString.split(";")) { String[] kvp = s.split("=", 2); connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); } String accountName = connectionKVPs.get(ACCOUNT_NAME); String accountKey = connectionKVPs.get(ACCOUNT_KEY); String endpointProtocol = connectionKVPs.get(ENDPOINT_PROTOCOL); String endpointSuffix = connectionKVPs.get(ENDPOINT_SUFFIX); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); } if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { String endpoint = String.format("%s: endpoint(endpoint); } return credentials(new SharedKeyCredential(accountName, accountKey)); } /** * Sets the http client used to send service requests * @param httpClient http client to send requests * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent * @param pipelinePolicy a pipeline policy * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.policies.add(pipelinePolicy); return this; } /** * Sets the logging level for service requests * @param logLevel logging level * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { this.logLevel = logLevel; return this; } /** * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE * @param configuration configuration store * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } }
class AppendBlobClientBuilder { private static final String ACCOUNT_NAME = "accountname"; private static final String ACCOUNT_KEY = "accountkey"; private static final String ENDPOINT_PROTOCOL = "defaultendpointsprotocol"; private static final String ENDPOINT_SUFFIX = "endpointsuffix"; private final List<HttpPipelinePolicy> policies; private String endpoint; private String containerName; private String blobName; private String snapshot; private SharedKeyCredential sharedKeyCredential; private TokenCredential tokenCredential; private SASTokenCredential sasTokenCredential; private HttpClient httpClient; private HttpLogDetailLevel logLevel; private RequestRetryOptions retryOptions; private Configuration configuration; public AppendBlobClientBuilder() { retryOptions = new RequestRetryOptions(); logLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); } /** * @return a {@link AppendBlobClient} created from the configurations in this builder. */ public AppendBlobClient buildClient() { return new AppendBlobClient(buildAsyncClient()); } /** * @return a {@link AppendBlobAsyncClient} created from the configurations in this builder. */ public AppendBlobAsyncClient buildAsyncClient() { return new AppendBlobAsyncClient(buildImpl(), snapshot); } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * @param endpoint URL of the service * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); URL url; try { url = new URL(endpoint); BlobURLParts parts = URLParser.parse(url); this.endpoint = parts.scheme() + ": if (parts.containerName() != null) { this.containerName = parts.containerName(); } if (parts.blobName() != null) { this.blobName = parts.blobName(); } if (parts.snapshot() != null) { this.snapshot = parts.snapshot(); } } catch (MalformedURLException | UnknownHostException ex) { throw new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."); } SASTokenCredential credential = SASTokenCredential.fromQuery(url.getQuery()); if (credential != null) { this.credential(credential); } return this; } /** * Sets the name of the container this client is connecting to. * @param containerName the name of the container * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob this client is connecting to. * @param blobName the name of the blob * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder blobName(String blobName) { this.blobName = blobName; return this; } /** * Sets the snapshot of the blob this client is connecting to. * @param snapshot the snapshot identifier for the blob * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credential(SharedKeyCredential credential) { this.sharedKeyCredential = credential; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = credential; this.sharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credential(SASTokenCredential credential) { this.sasTokenCredential = credential; this.sharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Clears the credential used to authorize requests sent to the service * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder anonymousCredential() { this.sharedKeyCredential = null; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the connection string for the service, parses it for authentication information (account name, account key) * @param connectionString connection string from access keys section * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> connectionKVPs = new HashMap<>(); for (String s : connectionString.split(";")) { String[] kvp = s.split("=", 2); connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); } String accountName = connectionKVPs.get(ACCOUNT_NAME); String accountKey = connectionKVPs.get(ACCOUNT_KEY); String endpointProtocol = connectionKVPs.get(ENDPOINT_PROTOCOL); String endpointSuffix = connectionKVPs.get(ENDPOINT_SUFFIX); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); } if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { String endpoint = String.format("%s: endpoint(endpoint); } return credential(new SharedKeyCredential(accountName, accountKey)); } /** * Sets the http client used to send service requests * @param httpClient http client to send requests * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent * @param pipelinePolicy a pipeline policy * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.policies.add(pipelinePolicy); return this; } /** * Sets the logging level for service requests * @param logLevel logging level * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { this.logLevel = logLevel; return this; } /** * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE * @param configuration configuration store * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * @param retryOptions the options to configure retry behaviors * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } }
This is not always true. If the customer has set up a root container, it's possible to have a blobUrl of the format: myaccount.blob.core.windows.net/blobName
public AppendBlobClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); URL url; try { url = new URL(endpoint); this.endpoint = new URL(url.getProtocol() + ": String path = url.getPath(); if (path != null && !path.isEmpty() && !path.equals("/")) { path = path.replaceAll("^/", "").replaceAll("/$", ""); String[] segments = path.split("/", 2); if (segments.length != 2) { throw new IllegalArgumentException("Endpoint should contain 0 or at least 2 path segments"); } else { this.containerName = segments[0]; this.blobName = segments[1]; } } } catch (MalformedURLException ex) { throw new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."); } SASTokenCredential credential = SASTokenCredential.fromQuery(url.getQuery()); if (credential != null) { this.sasTokenCredential = credential; } return this; }
throw new IllegalArgumentException("Endpoint should contain 0 or at least 2 path segments");
public AppendBlobClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); URL url; try { url = new URL(endpoint); BlobURLParts parts = URLParser.parse(url); this.endpoint = parts.scheme() + ": if (parts.containerName() != null) { this.containerName = parts.containerName(); } if (parts.blobName() != null) { this.blobName = parts.blobName(); } if (parts.snapshot() != null) { this.snapshot = parts.snapshot(); } } catch (MalformedURLException | UnknownHostException ex) { throw new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."); } SASTokenCredential credential = SASTokenCredential.fromQuery(url.getQuery()); if (credential != null) { this.credential(credential); } return this; }
class AppendBlobClientBuilder { private static final String ACCOUNT_NAME = "AccountName".toLowerCase(); private static final String ACCOUNT_KEY = "AccountKey".toLowerCase(); private static final String ENDPOINT_PROTOCOL = "DefaultEndpointsProtocol".toLowerCase(); private static final String ENDPOINT_SUFFIX = "EndpointSuffix".toLowerCase(); private final List<HttpPipelinePolicy> policies; private URL endpoint; private String containerName; private String blobName; private SharedKeyCredential sharedKeyCredential; private TokenCredential tokenCredential; private SASTokenCredential sasTokenCredential; private HttpClient httpClient; private HttpLogDetailLevel logLevel; private RetryPolicy retryPolicy; private Configuration configuration; public AppendBlobClientBuilder() { retryPolicy = new RetryPolicy(); logLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); } private AzureBlobStorageBuilder buildImpl() { Objects.requireNonNull(endpoint); Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (configuration == null) { configuration = Configuration.NONE; } policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (sharedKeyCredential != null) { policies.add(new SharedKeyCredentialPolicy(sharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s: } else if (sasTokenCredential != null) { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); } else { policies.add(new AnonymousCredentialPolicy()); } policies.add(retryPolicy); policies.addAll(this.policies); policies.add(new HttpLoggingPolicy(logLevel)); HttpPipeline pipeline = HttpPipeline.builder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint.toString(), containerName, blobName)) .pipeline(pipeline); } /** * @return a {@link AppendBlobClient} created from the configurations in this builder. */ public AppendBlobClient buildClient() { return new AppendBlobClient(buildAsyncClient()); } /** * @return a {@link AppendBlobAsyncClient} created from the configurations in this builder. */ public AppendBlobAsyncClient buildAsyncClient() { return new AppendBlobAsyncClient(buildImpl()); } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * @param endpoint URL of the service * @return the updated AppendBlobClientBuilder object */ /** * Sets the name of the container this client is connecting to. * @param containerName the name of the container * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob this client is connecting to. * @param blobName the name of the blob * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder blobName(String blobName) { this.blobName = blobName; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credentials(SharedKeyCredential credentials) { this.sharedKeyCredential = credentials; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credentials(TokenCredential credentials) { this.tokenCredential = credentials; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credentials(SASTokenCredential credentials) { this.sasTokenCredential = credentials; return this; } /** * Clears the credentials used to authorize requests sent to the service * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder anonymousCredentials() { this.sharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the connection string for the service, parses it for authentication information (account name, account key) * @param connectionString connection string from access keys section * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> connectionKVPs = new HashMap<>(); for (String s : connectionString.split(";")) { String[] kvp = s.split("=", 2); connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); } String accountName = connectionKVPs.get(ACCOUNT_NAME); String accountKey = connectionKVPs.get(ACCOUNT_KEY); String endpointProtocol = connectionKVPs.get(ENDPOINT_PROTOCOL); String endpointSuffix = connectionKVPs.get(ENDPOINT_SUFFIX); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); } if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { String endpoint = String.format("%s: endpoint(endpoint); } return credentials(new SharedKeyCredential(accountName, accountKey)); } /** * Sets the http client used to send service requests * @param httpClient http client to send requests * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent * @param pipelinePolicy a pipeline policy * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.policies.add(pipelinePolicy); return this; } /** * Sets the logging level for service requests * @param logLevel logging level * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { this.logLevel = logLevel; return this; } /** * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE * @param configuration configuration store * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } }
class AppendBlobClientBuilder { private static final String ACCOUNT_NAME = "accountname"; private static final String ACCOUNT_KEY = "accountkey"; private static final String ENDPOINT_PROTOCOL = "defaultendpointsprotocol"; private static final String ENDPOINT_SUFFIX = "endpointsuffix"; private final List<HttpPipelinePolicy> policies; private String endpoint; private String containerName; private String blobName; private String snapshot; private SharedKeyCredential sharedKeyCredential; private TokenCredential tokenCredential; private SASTokenCredential sasTokenCredential; private HttpClient httpClient; private HttpLogDetailLevel logLevel; private RequestRetryOptions retryOptions; private Configuration configuration; public AppendBlobClientBuilder() { retryOptions = new RequestRetryOptions(); logLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); } private AzureBlobStorageBuilder buildImpl() { Objects.requireNonNull(endpoint); Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (configuration == null) { configuration = ConfigurationManager.getConfiguration(); } policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (sharedKeyCredential != null) { policies.add(new SharedKeyCredentialPolicy(sharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); } else { policies.add(new AnonymousCredentialPolicy()); } policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(this.policies); policies.add(new HttpLoggingPolicy(logLevel)); HttpPipeline pipeline = HttpPipeline.builder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline); } /** * @return a {@link AppendBlobClient} created from the configurations in this builder. */ public AppendBlobClient buildClient() { return new AppendBlobClient(buildAsyncClient()); } /** * @return a {@link AppendBlobAsyncClient} created from the configurations in this builder. */ public AppendBlobAsyncClient buildAsyncClient() { return new AppendBlobAsyncClient(buildImpl(), snapshot); } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * @param endpoint URL of the service * @return the updated AppendBlobClientBuilder object */ /** * Sets the name of the container this client is connecting to. * @param containerName the name of the container * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob this client is connecting to. * @param blobName the name of the blob * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder blobName(String blobName) { this.blobName = blobName; return this; } /** * Sets the snapshot of the blob this client is connecting to. * @param snapshot the snapshot identifier for the blob * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credential(SharedKeyCredential credential) { this.sharedKeyCredential = credential; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = credential; this.sharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credential(SASTokenCredential credential) { this.sasTokenCredential = credential; this.sharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Clears the credential used to authorize requests sent to the service * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder anonymousCredential() { this.sharedKeyCredential = null; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the connection string for the service, parses it for authentication information (account name, account key) * @param connectionString connection string from access keys section * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> connectionKVPs = new HashMap<>(); for (String s : connectionString.split(";")) { String[] kvp = s.split("=", 2); connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); } String accountName = connectionKVPs.get(ACCOUNT_NAME); String accountKey = connectionKVPs.get(ACCOUNT_KEY); String endpointProtocol = connectionKVPs.get(ENDPOINT_PROTOCOL); String endpointSuffix = connectionKVPs.get(ENDPOINT_SUFFIX); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); } if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { String endpoint = String.format("%s: endpoint(endpoint); } return credential(new SharedKeyCredential(accountName, accountKey)); } /** * Sets the http client used to send service requests * @param httpClient http client to send requests * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent * @param pipelinePolicy a pipeline policy * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.policies.add(pipelinePolicy); return this; } /** * Sets the logging level for service requests * @param logLevel logging level * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { this.logLevel = logLevel; return this; } /** * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE * @param configuration configuration store * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * @param retryOptions the options to configure retry behaviors * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } }
I'm using BlobUrlParts now with UrlParser.
public AppendBlobClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); URL url; try { url = new URL(endpoint); this.endpoint = new URL(url.getProtocol() + ": String path = url.getPath(); if (path != null && !path.isEmpty() && !path.equals("/")) { path = path.replaceAll("^/", "").replaceAll("/$", ""); String[] segments = path.split("/", 2); if (segments.length != 2) { throw new IllegalArgumentException("Endpoint should contain 0 or at least 2 path segments"); } else { this.containerName = segments[0]; this.blobName = segments[1]; } } } catch (MalformedURLException ex) { throw new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."); } SASTokenCredential credential = SASTokenCredential.fromQuery(url.getQuery()); if (credential != null) { this.sasTokenCredential = credential; } return this; }
throw new IllegalArgumentException("Endpoint should contain 0 or at least 2 path segments");
public AppendBlobClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); URL url; try { url = new URL(endpoint); BlobURLParts parts = URLParser.parse(url); this.endpoint = parts.scheme() + ": if (parts.containerName() != null) { this.containerName = parts.containerName(); } if (parts.blobName() != null) { this.blobName = parts.blobName(); } if (parts.snapshot() != null) { this.snapshot = parts.snapshot(); } } catch (MalformedURLException | UnknownHostException ex) { throw new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."); } SASTokenCredential credential = SASTokenCredential.fromQuery(url.getQuery()); if (credential != null) { this.credential(credential); } return this; }
class AppendBlobClientBuilder { private static final String ACCOUNT_NAME = "AccountName".toLowerCase(); private static final String ACCOUNT_KEY = "AccountKey".toLowerCase(); private static final String ENDPOINT_PROTOCOL = "DefaultEndpointsProtocol".toLowerCase(); private static final String ENDPOINT_SUFFIX = "EndpointSuffix".toLowerCase(); private final List<HttpPipelinePolicy> policies; private URL endpoint; private String containerName; private String blobName; private SharedKeyCredential sharedKeyCredential; private TokenCredential tokenCredential; private SASTokenCredential sasTokenCredential; private HttpClient httpClient; private HttpLogDetailLevel logLevel; private RetryPolicy retryPolicy; private Configuration configuration; public AppendBlobClientBuilder() { retryPolicy = new RetryPolicy(); logLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); } private AzureBlobStorageBuilder buildImpl() { Objects.requireNonNull(endpoint); Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (configuration == null) { configuration = Configuration.NONE; } policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (sharedKeyCredential != null) { policies.add(new SharedKeyCredentialPolicy(sharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s: } else if (sasTokenCredential != null) { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); } else { policies.add(new AnonymousCredentialPolicy()); } policies.add(retryPolicy); policies.addAll(this.policies); policies.add(new HttpLoggingPolicy(logLevel)); HttpPipeline pipeline = HttpPipeline.builder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint.toString(), containerName, blobName)) .pipeline(pipeline); } /** * @return a {@link AppendBlobClient} created from the configurations in this builder. */ public AppendBlobClient buildClient() { return new AppendBlobClient(buildAsyncClient()); } /** * @return a {@link AppendBlobAsyncClient} created from the configurations in this builder. */ public AppendBlobAsyncClient buildAsyncClient() { return new AppendBlobAsyncClient(buildImpl()); } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * @param endpoint URL of the service * @return the updated AppendBlobClientBuilder object */ /** * Sets the name of the container this client is connecting to. * @param containerName the name of the container * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob this client is connecting to. * @param blobName the name of the blob * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder blobName(String blobName) { this.blobName = blobName; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credentials(SharedKeyCredential credentials) { this.sharedKeyCredential = credentials; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credentials(TokenCredential credentials) { this.tokenCredential = credentials; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credentials(SASTokenCredential credentials) { this.sasTokenCredential = credentials; return this; } /** * Clears the credentials used to authorize requests sent to the service * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder anonymousCredentials() { this.sharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the connection string for the service, parses it for authentication information (account name, account key) * @param connectionString connection string from access keys section * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> connectionKVPs = new HashMap<>(); for (String s : connectionString.split(";")) { String[] kvp = s.split("=", 2); connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); } String accountName = connectionKVPs.get(ACCOUNT_NAME); String accountKey = connectionKVPs.get(ACCOUNT_KEY); String endpointProtocol = connectionKVPs.get(ENDPOINT_PROTOCOL); String endpointSuffix = connectionKVPs.get(ENDPOINT_SUFFIX); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); } if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { String endpoint = String.format("%s: endpoint(endpoint); } return credentials(new SharedKeyCredential(accountName, accountKey)); } /** * Sets the http client used to send service requests * @param httpClient http client to send requests * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent * @param pipelinePolicy a pipeline policy * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.policies.add(pipelinePolicy); return this; } /** * Sets the logging level for service requests * @param logLevel logging level * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { this.logLevel = logLevel; return this; } /** * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE * @param configuration configuration store * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } }
class AppendBlobClientBuilder { private static final String ACCOUNT_NAME = "accountname"; private static final String ACCOUNT_KEY = "accountkey"; private static final String ENDPOINT_PROTOCOL = "defaultendpointsprotocol"; private static final String ENDPOINT_SUFFIX = "endpointsuffix"; private final List<HttpPipelinePolicy> policies; private String endpoint; private String containerName; private String blobName; private String snapshot; private SharedKeyCredential sharedKeyCredential; private TokenCredential tokenCredential; private SASTokenCredential sasTokenCredential; private HttpClient httpClient; private HttpLogDetailLevel logLevel; private RequestRetryOptions retryOptions; private Configuration configuration; public AppendBlobClientBuilder() { retryOptions = new RequestRetryOptions(); logLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); } private AzureBlobStorageBuilder buildImpl() { Objects.requireNonNull(endpoint); Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (configuration == null) { configuration = ConfigurationManager.getConfiguration(); } policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (sharedKeyCredential != null) { policies.add(new SharedKeyCredentialPolicy(sharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); } else { policies.add(new AnonymousCredentialPolicy()); } policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(this.policies); policies.add(new HttpLoggingPolicy(logLevel)); HttpPipeline pipeline = HttpPipeline.builder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline); } /** * @return a {@link AppendBlobClient} created from the configurations in this builder. */ public AppendBlobClient buildClient() { return new AppendBlobClient(buildAsyncClient()); } /** * @return a {@link AppendBlobAsyncClient} created from the configurations in this builder. */ public AppendBlobAsyncClient buildAsyncClient() { return new AppendBlobAsyncClient(buildImpl(), snapshot); } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * @param endpoint URL of the service * @return the updated AppendBlobClientBuilder object */ /** * Sets the name of the container this client is connecting to. * @param containerName the name of the container * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob this client is connecting to. * @param blobName the name of the blob * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder blobName(String blobName) { this.blobName = blobName; return this; } /** * Sets the snapshot of the blob this client is connecting to. * @param snapshot the snapshot identifier for the blob * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credential(SharedKeyCredential credential) { this.sharedKeyCredential = credential; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = credential; this.sharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder credential(SASTokenCredential credential) { this.sasTokenCredential = credential; this.sharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Clears the credential used to authorize requests sent to the service * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder anonymousCredential() { this.sharedKeyCredential = null; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the connection string for the service, parses it for authentication information (account name, account key) * @param connectionString connection string from access keys section * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> connectionKVPs = new HashMap<>(); for (String s : connectionString.split(";")) { String[] kvp = s.split("=", 2); connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); } String accountName = connectionKVPs.get(ACCOUNT_NAME); String accountKey = connectionKVPs.get(ACCOUNT_KEY); String endpointProtocol = connectionKVPs.get(ENDPOINT_PROTOCOL); String endpointSuffix = connectionKVPs.get(ENDPOINT_SUFFIX); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); } if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { String endpoint = String.format("%s: endpoint(endpoint); } return credential(new SharedKeyCredential(accountName, accountKey)); } /** * Sets the http client used to send service requests * @param httpClient http client to send requests * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent * @param pipelinePolicy a pipeline policy * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.policies.add(pipelinePolicy); return this; } /** * Sets the logging level for service requests * @param logLevel logging level * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { this.logLevel = logLevel; return this; } /** * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE * @param configuration configuration store * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * @param retryOptions the options to configure retry behaviors * @return the updated AppendBlobClientBuilder object */ public AppendBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } }
There turns out to be a utility already - BlobUrlParts can be created from UrlParser.
public BlobClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); URL url; try { url = new URL(endpoint); this.endpoint = new URL(url.getProtocol() + ": String path = url.getPath(); if (path != null && !path.isEmpty() && !path.equals("/")) { path = path.replaceAll("^/", "").replaceAll("/$", ""); String[] segments = path.split("/", 2); if (segments.length != 2) { throw new IllegalArgumentException("Endpoint should contain 0 or at least 2 path segments"); } else { this.containerName = segments[0]; this.blobName = segments[1]; } } } catch (MalformedURLException ex) { throw new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."); } SASTokenCredential credential = SASTokenCredential.fromQuery(url.getQuery()); if (credential != null) { this.sasTokenCredential = credential; } return this; }
url = new URL(endpoint);
public BlobClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); URL url; try { url = new URL(endpoint); BlobURLParts parts = URLParser.parse(url); this.endpoint = parts.scheme() + ": if (parts.containerName() != null) { this.containerName = parts.containerName(); } if (parts.blobName() != null) { this.blobName = parts.blobName(); } if (parts.snapshot() != null) { this.snapshot = parts.snapshot(); } } catch (MalformedURLException | UnknownHostException ex) { throw new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed."); } SASTokenCredential credential = SASTokenCredential.fromQuery(url.getQuery()); if (credential != null) { this.credential(credential); } return this; }
class BlobClientBuilder { private static final String ACCOUNT_NAME = "AccountName".toLowerCase(); private static final String ACCOUNT_KEY = "AccountKey".toLowerCase(); private static final String ENDPOINT_PROTOCOL = "DefaultEndpointsProtocol".toLowerCase(); private static final String ENDPOINT_SUFFIX = "EndpointSuffix".toLowerCase(); private final List<HttpPipelinePolicy> policies; private URL endpoint; private String containerName; private String blobName; private SharedKeyCredential sharedKeyCredential; private TokenCredential tokenCredential; private SASTokenCredential sasTokenCredential; private HttpClient httpClient; private HttpLogDetailLevel logLevel; private RetryPolicy retryPolicy; private Configuration configuration; public BlobClientBuilder() { retryPolicy = new RetryPolicy(); logLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); } private AzureBlobStorageBuilder buildImpl() { Objects.requireNonNull(endpoint); Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (configuration == null) { configuration = Configuration.NONE; } policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (sharedKeyCredential != null) { policies.add(new SharedKeyCredentialPolicy(sharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s: } else if (sasTokenCredential != null) { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); } else { policies.add(new AnonymousCredentialPolicy()); } policies.add(retryPolicy); policies.addAll(this.policies); policies.add(new HttpLoggingPolicy(logLevel)); HttpPipeline pipeline = HttpPipeline.builder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint.toString(), containerName, blobName)) .pipeline(pipeline); } /** * @return a {@link BlobClient} created from the configurations in this builder. */ public BlobClient buildClient() { return new BlobClient(buildAsyncClient()); } /** * @return a {@link BlobAsyncClient} created from the configurations in this builder. */ public BlobAsyncClient buildAsyncClient() { return new BlobAsyncClient(buildImpl()); } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * @param endpoint URL of the service * @return the updated BlobClientBuilder object */ /** * Sets the name of the container this client is connecting to. * @param containerName the name of the container * @return the updated BlobClientBuilder object */ public BlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob this client is connecting to. * @param blobName the name of the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder blobName(String blobName) { this.blobName = blobName; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated BlobClientBuilder object */ public BlobClientBuilder credentials(SharedKeyCredential credentials) { this.sharedKeyCredential = credentials; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated BlobClientBuilder object */ public BlobClientBuilder credentials(TokenCredential credentials) { this.tokenCredential = credentials; return this; } /** * Sets the credentials used to authorize requests sent to the service * @param credentials authorization credentials * @return the updated BlobClientBuilder object */ public BlobClientBuilder credentials(SASTokenCredential credentials) { this.sasTokenCredential = credentials; return this; } /** * Clears the credentials used to authorize requests sent to the service * @return the updated BlobClientBuilder object */ public BlobClientBuilder anonymousCredentials() { this.sharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the connection string for the service, parses it for authentication information (account name, account key) * @param connectionString connection string from access keys section * @return the updated BlobClientBuilder object */ public BlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> connectionKVPs = new HashMap<>(); for (String s : connectionString.split(";")) { String[] kvp = s.split("=", 2); connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); } String accountName = connectionKVPs.get(ACCOUNT_NAME); String accountKey = connectionKVPs.get(ACCOUNT_KEY); String endpointProtocol = connectionKVPs.get(ENDPOINT_PROTOCOL); String endpointSuffix = connectionKVPs.get(ENDPOINT_SUFFIX); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); } if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { String endpoint = String.format("%s: endpoint(endpoint); } return credentials(new SharedKeyCredential(accountName, accountKey)); } /** * Sets the http client used to send service requests * @param httpClient http client to send requests * @return the updated BlobClientBuilder object */ public BlobClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent * @param pipelinePolicy a pipeline policy * @return the updated BlobClientBuilder object */ public BlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.policies.add(pipelinePolicy); return this; } /** * Sets the logging level for service requests * @param logLevel logging level * @return the updated BlobClientBuilder object */ public BlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { this.logLevel = logLevel; return this; } /** * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE * @param configuration configuration store * @return the updated BlobClientBuilder object */ public BlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } }
class BlobClientBuilder { private static final String ACCOUNT_NAME = "accountname"; private static final String ACCOUNT_KEY = "accountkey"; private static final String ENDPOINT_PROTOCOL = "defaultendpointsprotocol"; private static final String ENDPOINT_SUFFIX = "endpointsuffix"; private final List<HttpPipelinePolicy> policies; private String endpoint; private String containerName; private String blobName; private String snapshot; private SharedKeyCredential sharedKeyCredential; private TokenCredential tokenCredential; private SASTokenCredential sasTokenCredential; private HttpClient httpClient; private HttpLogDetailLevel logLevel; private RequestRetryOptions retryOptions; private Configuration configuration; public BlobClientBuilder() { retryOptions = new RequestRetryOptions(); logLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); } private AzureBlobStorageBuilder buildImpl() { Objects.requireNonNull(endpoint); Objects.requireNonNull(containerName); Objects.requireNonNull(blobName); final List<HttpPipelinePolicy> policies = new ArrayList<>(); if (configuration == null) { configuration = ConfigurationManager.getConfiguration(); } policies.add(new UserAgentPolicy(BlobConfiguration.NAME, BlobConfiguration.VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (sharedKeyCredential != null) { policies.add(new SharedKeyCredentialPolicy(sharedKeyCredential)); } else if (tokenCredential != null) { policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint))); } else if (sasTokenCredential != null) { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); } else { policies.add(new AnonymousCredentialPolicy()); } policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(this.policies); policies.add(new HttpLoggingPolicy(logLevel)); HttpPipeline pipeline = HttpPipeline.builder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new AzureBlobStorageBuilder() .url(String.format("%s/%s/%s", endpoint, containerName, blobName)) .pipeline(pipeline); } /** * @return a {@link BlobClient} created from the configurations in this builder. */ public BlobClient buildClient() { return new BlobClient(buildAsyncClient()); } /** * @return a {@link BlobAsyncClient} created from the configurations in this builder. */ public BlobAsyncClient buildAsyncClient() { return new BlobAsyncClient(buildImpl(), snapshot); } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * @param endpoint URL of the service * @return the updated BlobClientBuilder object */ /** * Sets the name of the container this client is connecting to. * @param containerName the name of the container * @return the updated BlobClientBuilder object */ public BlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob this client is connecting to. * @param blobName the name of the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder blobName(String blobName) { this.blobName = blobName; return this; } /** * Sets the snapshot of the blob this client is connecting to. * @param snapshot the snapshot identifier for the blob * @return the updated BlobClientBuilder object */ public BlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated BlobClientBuilder object */ public BlobClientBuilder credential(SharedKeyCredential credential) { this.sharedKeyCredential = credential; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated BlobClientBuilder object */ public BlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = credential; this.sharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the credential used to authorize requests sent to the service * @param credential authorization credential * @return the updated BlobClientBuilder object */ public BlobClientBuilder credential(SASTokenCredential credential) { this.sasTokenCredential = credential; this.sharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Clears the credential used to authorize requests sent to the service * @return the updated BlobClientBuilder object */ public BlobClientBuilder anonymousCredential() { this.sharedKeyCredential = null; this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the connection string for the service, parses it for authentication information (account name, account key) * @param connectionString connection string from access keys section * @return the updated BlobClientBuilder object */ public BlobClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); Map<String, String> connectionKVPs = new HashMap<>(); for (String s : connectionString.split(";")) { String[] kvp = s.split("=", 2); connectionKVPs.put(kvp[0].toLowerCase(), kvp[1]); } String accountName = connectionKVPs.get(ACCOUNT_NAME); String accountKey = connectionKVPs.get(ACCOUNT_KEY); String endpointProtocol = connectionKVPs.get(ENDPOINT_PROTOCOL); String endpointSuffix = connectionKVPs.get(ENDPOINT_SUFFIX); if (ImplUtils.isNullOrEmpty(accountName) || ImplUtils.isNullOrEmpty(accountKey)) { throw new IllegalArgumentException("Connection string must contain 'AccountName' and 'AccountKey'."); } if (!ImplUtils.isNullOrEmpty(endpointProtocol) && !ImplUtils.isNullOrEmpty(endpointSuffix)) { String endpoint = String.format("%s: endpoint(endpoint); } return credential(new SharedKeyCredential(accountName, accountKey)); } /** * Sets the http client used to send service requests * @param httpClient http client to send requests * @return the updated BlobClientBuilder object */ public BlobClientBuilder httpClient(HttpClient httpClient) { this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent * @param pipelinePolicy a pipeline policy * @return the updated BlobClientBuilder object */ public BlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.policies.add(pipelinePolicy); return this; } /** * Sets the logging level for service requests * @param logLevel logging level * @return the updated BlobClientBuilder object */ public BlobClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { this.logLevel = logLevel; return this; } /** * Sets the configuration object used to retrieve environment configuration values used to buildClient the client with * when they are not set in the appendBlobClientBuilder, defaults to Configuration.NONE * @param configuration configuration store * @return the updated BlobClientBuilder object */ public BlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * @param retryOptions the options to configure retry behaviors * @return the updated BlobClientBuilder object */ public BlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } }
You'd want to strip all but the first, not the last. Blob names can have forward slashes in them. Example: `https://myaccoutn/blob.core.windows.net/containername/blobname/stillblobname/evenMoreOfTheSameBlobNameContainingSlashes`. Unless I'm wrong as to what this utility method does, in which case I'd ask it be renamed to better represent what it's doing.
public ContainerAsyncClient getContainerAsyncClient() { return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(Utility.stripLastPathSegment(getBlobUrl()).toString()) .pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline())); }
.url(Utility.stripLastPathSegment(getBlobUrl()).toString())
public ContainerAsyncClient getContainerAsyncClient() { try { BlobURLParts parts = URLParser.parse(getBlobUrl()); return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s: .pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline())); } catch (UnknownHostException e) { throw new RuntimeException(e); } }
class BlobAsyncClient { private static final long BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; final BlobAsyncRawClient blobAsyncRawClient; private final String snapshot; /** * Package-private constructor for use by {@link BlobClientBuilder}. * @param azureBlobStorageBuilder the API client builder for blob storage API */ BlobAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder, String snapshot) { this.blobAsyncRawClient = new BlobAsyncRawClient(azureBlobStorageBuilder.build()); this.snapshot = snapshot; } /** * Static method for getting a new builder for this class. * * @return * A new {@link BlobClientBuilder} instance. */ public static BlobClientBuilder blobClientBuilder() { return new BlobClientBuilder(); } /** * Creates a new {@link BlockBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be block blobs. * * @return * A {@link BlockBlobAsyncClient} to this resource. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), snapshot); } /** * Creates a new {@link AppendBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be append blobs. * * @return * A {@link AppendBlobAsyncClient} to this resource. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline())); } /** * Creates a new {@link PageBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be page blobs. * * @return * A {@link PageBlobAsyncClient} to this resource. */ public PageBlobAsyncClient asPageBlobAsyncClient() { return new PageBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), snapshot); } /** * Initializes a {@link ContainerAsyncClient} object pointing to the containing this blob is in. This method does * not create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * * @return * A {@link ContainerAsyncClient} object pointing to the container containing the blob */ /** * Gets the URL of the blob represented by this client. * @return the URL. */ public URL getBlobUrl() { try { return new URL(blobAsyncRawClient.azureBlobStorage.url()); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), blobAsyncRawClient.azureBlobStorage.url()), e); } } /** * Gets if the blob this client represents exists in the cloud. * * @return * true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> exists() { return this.getProperties() .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL) { return this.startCopyFromURL(sourceURL, null, null, null); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .startCopyFromURL(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId) { return this.abortCopyFromURL(copyId, null); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .abortCopyFromURL(copyId, leaseAccessConditions) .map(VoidResponse::new); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource) { return this.copyFromURL(copySource, null, null, null); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .syncCopyFromURL(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download() { return this.download(null, null, false, null); } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param range * {@link BlobRange} * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download(BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { return blobAsyncRawClient .download(range, accessConditions, rangeGetContentMD5) .map(response -> new SimpleResponse<>( response.rawResponse(), response.body(options).map(ByteBuf::nioBuffer).switchIfEmpty(Flux.just(ByteBuffer.allocate(0))))); } /** * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. */ public Mono<Void> downloadToFile(String filePath) { return this.downloadToFile(filePath, null, null, false, null); } /** * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range * {@link BlobRange} * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. */ public Mono<Void> downloadToFile(String filePath, BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { AsynchronousFileChannel channel; try { channel = AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { return Mono.error(e); } return Mono.justOrEmpty(range) .switchIfEmpty(getFullBlobRange(accessConditions)) .flatMapMany(rg -> Flux.fromIterable(sliceBlobRange(rg))) .flatMap(chunk -> blobAsyncRawClient .download(chunk, accessConditions, rangeGetContentMD5) .subscribeOn(Schedulers.elastic()) .flatMap(dar -> FluxUtil.bytebufStreamToFile(dar.body(options), channel, chunk.offset() - (range == null ? 0 : range.offset()))) .timeout(Duration.ofSeconds(300)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException)) .then() .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw new UncheckedIOException(e); } }); } private Mono<BlobRange> getFullBlobRange(BlobAccessConditions accessConditions) { return getProperties(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize())); } private List<BlobRange> sliceBlobRange(BlobRange blobRange) { long offset = blobRange.offset(); long length = blobRange.count(); List<BlobRange> chunks = new ArrayList<>(); for (long pos = offset; pos < offset + length; pos += BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE) { long count = BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE; if (pos + count > offset + length) { count = offset + length - pos; } chunks.add(new BlobRange(pos, count)); } return chunks; } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete() { return this.delete(null, null); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @param deleteBlobSnapshotOptions * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must * pass null. * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobAccessConditions accessConditions) { return blobAsyncRawClient .delete(deleteBlobSnapshotOptions, accessConditions) .map(VoidResponse::new); } /** * Returns the blob's metadata and properties. * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties() { return this.getProperties(null); } /** * Returns the blob's metadata and properties. * * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties(BlobAccessConditions accessConditions) { return blobAsyncRawClient .getProperties(accessConditions) .map(rb -> new SimpleResponse<>(rb, new BlobProperties(rb.deserializedHeaders()))); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers) { return this.setHTTPHeaders(headers, null); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setHTTPHeaders(headers, accessConditions) .map(VoidResponse::new); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata) { return this.setMetadata(metadata, null); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setMetadata(metadata, accessConditions) .map(VoidResponse::new); } /** * Creates a read-only snapshot of a blob. * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot() { return this.createSnapshot(null, null); } /** * Creates a read-only snapshot of a blob. * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .createSnapshot(metadata, accessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().snapshot())); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier) { return this.setTier(tier, null); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .setTier(tier, leaseAccessConditions) .map(VoidResponse::new); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> undelete() { return blobAsyncRawClient .undelete() .map(VoidResponse::new); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedId * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedID * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .acquireLease(proposedID, duration, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID) { return this.renewLease(leaseID, null); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .renewLease(leaseID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID) { return this.releaseLease(leaseID, null); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .releaseLease(leaseID, modifiedAccessConditions) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease() { return this.breakLease(null, null); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @param breakPeriodInSeconds * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be * available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .breakLease(breakPeriodInSeconds, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseTime())); } /** * ChangeLease changes the blob's lease ID. * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * * @return * A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null); } /** * ChangeLease changes the blob's lease ID. For more information, see the <a href="https: * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .changeLease(leaseId, proposedID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. For more information, please see the <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfo() { return blobAsyncRawClient .getAccountInfo() .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } }
class BlobAsyncClient { private static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final int BLOB_MAX_DOWNLOAD_BLOCK_SIZE = 100 * Constants.MB; final BlobAsyncRawClient blobAsyncRawClient; /** * Package-private constructor for use by {@link BlobClientBuilder}. * @param azureBlobStorageBuilder the API client builder for blob storage API */ BlobAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder, String snapshot) { this.blobAsyncRawClient = new BlobAsyncRawClient(azureBlobStorageBuilder.build(), snapshot); } /** * Static method for getting a new builder for this class. * * @return * A new {@link BlobClientBuilder} instance. */ public static BlobClientBuilder blobClientBuilder() { return new BlobClientBuilder(); } /** * Creates a new {@link BlockBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be block blobs. * * @return * A {@link BlockBlobAsyncClient} to this resource. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), blobAsyncRawClient.snapshot); } /** * Creates a new {@link AppendBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be append blobs. * * @return * A {@link AppendBlobAsyncClient} to this resource. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), blobAsyncRawClient.snapshot); } /** * Creates a new {@link PageBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be page blobs. * * @return * A {@link PageBlobAsyncClient} to this resource. */ public PageBlobAsyncClient asPageBlobAsyncClient() { return new PageBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), blobAsyncRawClient.snapshot); } /** * Initializes a {@link ContainerAsyncClient} object pointing to the container this blob is in. This method does * not create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * * @return * A {@link ContainerAsyncClient} object pointing to the container containing the blob */ /** * Gets the URL of the blob represented by this client. * @return the URL. */ public URL getBlobUrl() { try { UrlBuilder urlBuilder = UrlBuilder.parse(blobAsyncRawClient.azureBlobStorage.url()); if (blobAsyncRawClient.snapshot != null) { urlBuilder.query("snapshot=" + blobAsyncRawClient.snapshot); } return urlBuilder.toURL(); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), blobAsyncRawClient.azureBlobStorage.url()), e); } } /** * Gets if the blob this client represents exists in the cloud. * * @return * true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> exists() { return this.getProperties() .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL) { return this.startCopyFromURL(sourceURL, null, null, null); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .startCopyFromURL(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId) { return this.abortCopyFromURL(copyId, null); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .abortCopyFromURL(copyId, leaseAccessConditions) .map(VoidResponse::new); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource) { return this.copyFromURL(copySource, null, null, null); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .syncCopyFromURL(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download() { return this.download(null, null, false, null); } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param range * {@link BlobRange} * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download(BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { return blobAsyncRawClient .download(range, accessConditions, rangeGetContentMD5) .map(response -> new SimpleResponse<>( response.rawResponse(), response.body(options).map(ByteBuf::nioBuffer).switchIfEmpty(Flux.just(ByteBuffer.allocate(0))))); } /** * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * <p> * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * use the other overload providing the {@link BlobRange} parameter. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. */ public Mono<Void> downloadToFile(String filePath) { return this.downloadToFile(filePath, null, BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE, null, false, null); } /** * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * <p> * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * provide the {@link BlobRange} parameter. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range * {@link BlobRange} * @param blockSize * the size of a chunk to download at a time, in bytes * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. */ public Mono<Void> downloadToFile(String filePath, BlobRange range, Integer blockSize, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { if (blockSize < 0 || blockSize > BLOB_MAX_DOWNLOAD_BLOCK_SIZE) { throw new IllegalArgumentException("Block size should not exceed 100MB"); } return Mono.using(() -> { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw new UncheckedIOException(e); } }, channel -> Mono.justOrEmpty(range) .switchIfEmpty(getFullBlobRange(accessConditions)) .flatMapMany(rg -> Flux.fromIterable(sliceBlobRange(rg, blockSize))) .flatMap(chunk -> blobAsyncRawClient .download(chunk, accessConditions, rangeGetContentMD5) .subscribeOn(Schedulers.elastic()) .flatMap(dar -> FluxUtil.bytebufStreamToFile(dar.body(options), channel, chunk.offset() - (range == null ? 0 : range.offset())))) .then(), channel -> { try { channel.close(); } catch (IOException e) { throw new UncheckedIOException(e); } }); } private Mono<BlobRange> getFullBlobRange(BlobAccessConditions accessConditions) { return getProperties(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize())); } private List<BlobRange> sliceBlobRange(BlobRange blobRange, Integer blockSize) { if (blockSize == null) { blockSize = BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE; } long offset = blobRange.offset(); long length = blobRange.count(); List<BlobRange> chunks = new ArrayList<>(); for (long pos = offset; pos < offset + length; pos += blockSize) { long count = blockSize; if (pos + count > offset + length) { count = offset + length - pos; } chunks.add(new BlobRange(pos, count)); } return chunks; } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete() { return this.delete(null, null); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @param deleteBlobSnapshotOptions * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must * pass null. * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobAccessConditions accessConditions) { return blobAsyncRawClient .delete(deleteBlobSnapshotOptions, accessConditions) .map(VoidResponse::new); } /** * Returns the blob's metadata and properties. * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties() { return this.getProperties(null); } /** * Returns the blob's metadata and properties. * * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties(BlobAccessConditions accessConditions) { return blobAsyncRawClient .getProperties(accessConditions) .map(rb -> new SimpleResponse<>(rb, new BlobProperties(rb.deserializedHeaders()))); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers) { return this.setHTTPHeaders(headers, null); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setHTTPHeaders(headers, accessConditions) .map(VoidResponse::new); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata) { return this.setMetadata(metadata, null); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setMetadata(metadata, accessConditions) .map(VoidResponse::new); } /** * Creates a read-only snapshot of a blob. * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot() { return this.createSnapshot(null, null); } /** * Creates a read-only snapshot of a blob. * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .createSnapshot(metadata, accessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().snapshot())); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier) { return this.setTier(tier, null); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .setTier(tier, leaseAccessConditions) .map(VoidResponse::new); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> undelete() { return blobAsyncRawClient .undelete() .map(VoidResponse::new); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedId * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedID * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .acquireLease(proposedID, duration, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID) { return this.renewLease(leaseID, null); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .renewLease(leaseID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID) { return this.releaseLease(leaseID, null); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .releaseLease(leaseID, modifiedAccessConditions) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease() { return this.breakLease(null, null); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @param breakPeriodInSeconds * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be * available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .breakLease(breakPeriodInSeconds, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseTime())); } /** * ChangeLease changes the blob's lease ID. * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * * @return * A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null); } /** * ChangeLease changes the blob's lease ID. For more information, see the <a href="https: * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .changeLease(leaseId, proposedID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. For more information, please see the <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfo() { return blobAsyncRawClient .getAccountInfo() .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } }
We can't make an extra getProperties call just to get the size. This potentially doubles the number of IO operations. Is there a reason why we didn't just pull the implementation from v11?
private Mono<BlobRange> getFullBlobRange(BlobAccessConditions accessConditions) { return getProperties(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize())); }
return getProperties(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize()));
private Mono<BlobRange> getFullBlobRange(BlobAccessConditions accessConditions) { return getProperties(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize())); }
class BlobAsyncClient { private static final long BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; final BlobAsyncRawClient blobAsyncRawClient; private final String snapshot; /** * Package-private constructor for use by {@link BlobClientBuilder}. * @param azureBlobStorageBuilder the API client builder for blob storage API */ BlobAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder, String snapshot) { this.blobAsyncRawClient = new BlobAsyncRawClient(azureBlobStorageBuilder.build()); this.snapshot = snapshot; } /** * Static method for getting a new builder for this class. * * @return * A new {@link BlobClientBuilder} instance. */ public static BlobClientBuilder blobClientBuilder() { return new BlobClientBuilder(); } /** * Creates a new {@link BlockBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be block blobs. * * @return * A {@link BlockBlobAsyncClient} to this resource. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), snapshot); } /** * Creates a new {@link AppendBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be append blobs. * * @return * A {@link AppendBlobAsyncClient} to this resource. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline())); } /** * Creates a new {@link PageBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be page blobs. * * @return * A {@link PageBlobAsyncClient} to this resource. */ public PageBlobAsyncClient asPageBlobAsyncClient() { return new PageBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), snapshot); } /** * Initializes a {@link ContainerAsyncClient} object pointing to the containing this blob is in. This method does * not create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * * @return * A {@link ContainerAsyncClient} object pointing to the container containing the blob */ public ContainerAsyncClient getContainerAsyncClient() { return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(Utility.stripLastPathSegment(getBlobUrl()).toString()) .pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline())); } /** * Gets the URL of the blob represented by this client. * @return the URL. */ public URL getBlobUrl() { try { return new URL(blobAsyncRawClient.azureBlobStorage.url()); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), blobAsyncRawClient.azureBlobStorage.url()), e); } } /** * Gets if the blob this client represents exists in the cloud. * * @return * true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> exists() { return this.getProperties() .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL) { return this.startCopyFromURL(sourceURL, null, null, null); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .startCopyFromURL(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId) { return this.abortCopyFromURL(copyId, null); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .abortCopyFromURL(copyId, leaseAccessConditions) .map(VoidResponse::new); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource) { return this.copyFromURL(copySource, null, null, null); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .syncCopyFromURL(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download() { return this.download(null, null, false, null); } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param range * {@link BlobRange} * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download(BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { return blobAsyncRawClient .download(range, accessConditions, rangeGetContentMD5) .map(response -> new SimpleResponse<>( response.rawResponse(), response.body(options).map(ByteBuf::nioBuffer).switchIfEmpty(Flux.just(ByteBuffer.allocate(0))))); } /** * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. */ public Mono<Void> downloadToFile(String filePath) { return this.downloadToFile(filePath, null, null, false, null); } /** * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range * {@link BlobRange} * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. */ public Mono<Void> downloadToFile(String filePath, BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { AsynchronousFileChannel channel; try { channel = AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { return Mono.error(e); } return Mono.justOrEmpty(range) .switchIfEmpty(getFullBlobRange(accessConditions)) .flatMapMany(rg -> Flux.fromIterable(sliceBlobRange(rg))) .flatMap(chunk -> blobAsyncRawClient .download(chunk, accessConditions, rangeGetContentMD5) .subscribeOn(Schedulers.elastic()) .flatMap(dar -> FluxUtil.bytebufStreamToFile(dar.body(options), channel, chunk.offset() - (range == null ? 0 : range.offset()))) .timeout(Duration.ofSeconds(300)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException)) .then() .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw new UncheckedIOException(e); } }); } private List<BlobRange> sliceBlobRange(BlobRange blobRange) { long offset = blobRange.offset(); long length = blobRange.count(); List<BlobRange> chunks = new ArrayList<>(); for (long pos = offset; pos < offset + length; pos += BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE) { long count = BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE; if (pos + count > offset + length) { count = offset + length - pos; } chunks.add(new BlobRange(pos, count)); } return chunks; } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete() { return this.delete(null, null); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @param deleteBlobSnapshotOptions * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must * pass null. * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobAccessConditions accessConditions) { return blobAsyncRawClient .delete(deleteBlobSnapshotOptions, accessConditions) .map(VoidResponse::new); } /** * Returns the blob's metadata and properties. * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties() { return this.getProperties(null); } /** * Returns the blob's metadata and properties. * * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties(BlobAccessConditions accessConditions) { return blobAsyncRawClient .getProperties(accessConditions) .map(rb -> new SimpleResponse<>(rb, new BlobProperties(rb.deserializedHeaders()))); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers) { return this.setHTTPHeaders(headers, null); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setHTTPHeaders(headers, accessConditions) .map(VoidResponse::new); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata) { return this.setMetadata(metadata, null); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setMetadata(metadata, accessConditions) .map(VoidResponse::new); } /** * Creates a read-only snapshot of a blob. * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot() { return this.createSnapshot(null, null); } /** * Creates a read-only snapshot of a blob. * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .createSnapshot(metadata, accessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().snapshot())); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier) { return this.setTier(tier, null); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .setTier(tier, leaseAccessConditions) .map(VoidResponse::new); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> undelete() { return blobAsyncRawClient .undelete() .map(VoidResponse::new); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedId * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedID * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .acquireLease(proposedID, duration, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID) { return this.renewLease(leaseID, null); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .renewLease(leaseID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID) { return this.releaseLease(leaseID, null); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .releaseLease(leaseID, modifiedAccessConditions) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease() { return this.breakLease(null, null); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @param breakPeriodInSeconds * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be * available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .breakLease(breakPeriodInSeconds, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseTime())); } /** * ChangeLease changes the blob's lease ID. * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * * @return * A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null); } /** * ChangeLease changes the blob's lease ID. For more information, see the <a href="https: * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .changeLease(leaseId, proposedID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. For more information, please see the <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfo() { return blobAsyncRawClient .getAccountInfo() .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } }
class BlobAsyncClient { private static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final int BLOB_MAX_DOWNLOAD_BLOCK_SIZE = 100 * Constants.MB; final BlobAsyncRawClient blobAsyncRawClient; /** * Package-private constructor for use by {@link BlobClientBuilder}. * @param azureBlobStorageBuilder the API client builder for blob storage API */ BlobAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder, String snapshot) { this.blobAsyncRawClient = new BlobAsyncRawClient(azureBlobStorageBuilder.build(), snapshot); } /** * Static method for getting a new builder for this class. * * @return * A new {@link BlobClientBuilder} instance. */ public static BlobClientBuilder blobClientBuilder() { return new BlobClientBuilder(); } /** * Creates a new {@link BlockBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be block blobs. * * @return * A {@link BlockBlobAsyncClient} to this resource. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), blobAsyncRawClient.snapshot); } /** * Creates a new {@link AppendBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be append blobs. * * @return * A {@link AppendBlobAsyncClient} to this resource. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), blobAsyncRawClient.snapshot); } /** * Creates a new {@link PageBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be page blobs. * * @return * A {@link PageBlobAsyncClient} to this resource. */ public PageBlobAsyncClient asPageBlobAsyncClient() { return new PageBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), blobAsyncRawClient.snapshot); } /** * Initializes a {@link ContainerAsyncClient} object pointing to the container this blob is in. This method does * not create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * * @return * A {@link ContainerAsyncClient} object pointing to the container containing the blob */ public ContainerAsyncClient getContainerAsyncClient() { try { BlobURLParts parts = URLParser.parse(getBlobUrl()); return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s: .pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline())); } catch (UnknownHostException e) { throw new RuntimeException(e); } } /** * Gets the URL of the blob represented by this client. * @return the URL. */ public URL getBlobUrl() { try { UrlBuilder urlBuilder = UrlBuilder.parse(blobAsyncRawClient.azureBlobStorage.url()); if (blobAsyncRawClient.snapshot != null) { urlBuilder.query("snapshot=" + blobAsyncRawClient.snapshot); } return urlBuilder.toURL(); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), blobAsyncRawClient.azureBlobStorage.url()), e); } } /** * Gets if the blob this client represents exists in the cloud. * * @return * true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> exists() { return this.getProperties() .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL) { return this.startCopyFromURL(sourceURL, null, null, null); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .startCopyFromURL(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId) { return this.abortCopyFromURL(copyId, null); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .abortCopyFromURL(copyId, leaseAccessConditions) .map(VoidResponse::new); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource) { return this.copyFromURL(copySource, null, null, null); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .syncCopyFromURL(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download() { return this.download(null, null, false, null); } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param range * {@link BlobRange} * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download(BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { return blobAsyncRawClient .download(range, accessConditions, rangeGetContentMD5) .map(response -> new SimpleResponse<>( response.rawResponse(), response.body(options).map(ByteBuf::nioBuffer).switchIfEmpty(Flux.just(ByteBuffer.allocate(0))))); } /** * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * <p> * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * use the other overload providing the {@link BlobRange} parameter. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. */ public Mono<Void> downloadToFile(String filePath) { return this.downloadToFile(filePath, null, BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE, null, false, null); } /** * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * <p> * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * provide the {@link BlobRange} parameter. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range * {@link BlobRange} * @param blockSize * the size of a chunk to download at a time, in bytes * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. */ public Mono<Void> downloadToFile(String filePath, BlobRange range, Integer blockSize, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { if (blockSize < 0 || blockSize > BLOB_MAX_DOWNLOAD_BLOCK_SIZE) { throw new IllegalArgumentException("Block size should not exceed 100MB"); } return Mono.using(() -> { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw new UncheckedIOException(e); } }, channel -> Mono.justOrEmpty(range) .switchIfEmpty(getFullBlobRange(accessConditions)) .flatMapMany(rg -> Flux.fromIterable(sliceBlobRange(rg, blockSize))) .flatMap(chunk -> blobAsyncRawClient .download(chunk, accessConditions, rangeGetContentMD5) .subscribeOn(Schedulers.elastic()) .flatMap(dar -> FluxUtil.bytebufStreamToFile(dar.body(options), channel, chunk.offset() - (range == null ? 0 : range.offset())))) .then(), channel -> { try { channel.close(); } catch (IOException e) { throw new UncheckedIOException(e); } }); } private List<BlobRange> sliceBlobRange(BlobRange blobRange, Integer blockSize) { if (blockSize == null) { blockSize = BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE; } long offset = blobRange.offset(); long length = blobRange.count(); List<BlobRange> chunks = new ArrayList<>(); for (long pos = offset; pos < offset + length; pos += blockSize) { long count = blockSize; if (pos + count > offset + length) { count = offset + length - pos; } chunks.add(new BlobRange(pos, count)); } return chunks; } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete() { return this.delete(null, null); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @param deleteBlobSnapshotOptions * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must * pass null. * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobAccessConditions accessConditions) { return blobAsyncRawClient .delete(deleteBlobSnapshotOptions, accessConditions) .map(VoidResponse::new); } /** * Returns the blob's metadata and properties. * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties() { return this.getProperties(null); } /** * Returns the blob's metadata and properties. * * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties(BlobAccessConditions accessConditions) { return blobAsyncRawClient .getProperties(accessConditions) .map(rb -> new SimpleResponse<>(rb, new BlobProperties(rb.deserializedHeaders()))); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers) { return this.setHTTPHeaders(headers, null); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setHTTPHeaders(headers, accessConditions) .map(VoidResponse::new); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata) { return this.setMetadata(metadata, null); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setMetadata(metadata, accessConditions) .map(VoidResponse::new); } /** * Creates a read-only snapshot of a blob. * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot() { return this.createSnapshot(null, null); } /** * Creates a read-only snapshot of a blob. * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .createSnapshot(metadata, accessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().snapshot())); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier) { return this.setTier(tier, null); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .setTier(tier, leaseAccessConditions) .map(VoidResponse::new); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> undelete() { return blobAsyncRawClient .undelete() .map(VoidResponse::new); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedId * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedID * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .acquireLease(proposedID, duration, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID) { return this.renewLease(leaseID, null); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .renewLease(leaseID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID) { return this.releaseLease(leaseID, null); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .releaseLease(leaseID, modifiedAccessConditions) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease() { return this.breakLease(null, null); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @param breakPeriodInSeconds * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be * available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .breakLease(breakPeriodInSeconds, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseTime())); } /** * ChangeLease changes the blob's lease ID. * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * * @return * A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null); } /** * ChangeLease changes the blob's lease ID. For more information, see the <a href="https: * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .changeLease(leaseId, proposedID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. For more information, please see the <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfo() { return blobAsyncRawClient .getAccountInfo() .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } }
This also does not support etag locking or progress reporting.
private Mono<BlobRange> getFullBlobRange(BlobAccessConditions accessConditions) { return getProperties(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize())); }
return getProperties(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize()));
private Mono<BlobRange> getFullBlobRange(BlobAccessConditions accessConditions) { return getProperties(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize())); }
class BlobAsyncClient { private static final long BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; final BlobAsyncRawClient blobAsyncRawClient; private final String snapshot; /** * Package-private constructor for use by {@link BlobClientBuilder}. * @param azureBlobStorageBuilder the API client builder for blob storage API */ BlobAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder, String snapshot) { this.blobAsyncRawClient = new BlobAsyncRawClient(azureBlobStorageBuilder.build()); this.snapshot = snapshot; } /** * Static method for getting a new builder for this class. * * @return * A new {@link BlobClientBuilder} instance. */ public static BlobClientBuilder blobClientBuilder() { return new BlobClientBuilder(); } /** * Creates a new {@link BlockBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be block blobs. * * @return * A {@link BlockBlobAsyncClient} to this resource. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), snapshot); } /** * Creates a new {@link AppendBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be append blobs. * * @return * A {@link AppendBlobAsyncClient} to this resource. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline())); } /** * Creates a new {@link PageBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be page blobs. * * @return * A {@link PageBlobAsyncClient} to this resource. */ public PageBlobAsyncClient asPageBlobAsyncClient() { return new PageBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), snapshot); } /** * Initializes a {@link ContainerAsyncClient} object pointing to the containing this blob is in. This method does * not create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * * @return * A {@link ContainerAsyncClient} object pointing to the container containing the blob */ public ContainerAsyncClient getContainerAsyncClient() { return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(Utility.stripLastPathSegment(getBlobUrl()).toString()) .pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline())); } /** * Gets the URL of the blob represented by this client. * @return the URL. */ public URL getBlobUrl() { try { return new URL(blobAsyncRawClient.azureBlobStorage.url()); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), blobAsyncRawClient.azureBlobStorage.url()), e); } } /** * Gets if the blob this client represents exists in the cloud. * * @return * true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> exists() { return this.getProperties() .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL) { return this.startCopyFromURL(sourceURL, null, null, null); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .startCopyFromURL(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId) { return this.abortCopyFromURL(copyId, null); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .abortCopyFromURL(copyId, leaseAccessConditions) .map(VoidResponse::new); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource) { return this.copyFromURL(copySource, null, null, null); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .syncCopyFromURL(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download() { return this.download(null, null, false, null); } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param range * {@link BlobRange} * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download(BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { return blobAsyncRawClient .download(range, accessConditions, rangeGetContentMD5) .map(response -> new SimpleResponse<>( response.rawResponse(), response.body(options).map(ByteBuf::nioBuffer).switchIfEmpty(Flux.just(ByteBuffer.allocate(0))))); } /** * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. */ public Mono<Void> downloadToFile(String filePath) { return this.downloadToFile(filePath, null, null, false, null); } /** * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range * {@link BlobRange} * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. */ public Mono<Void> downloadToFile(String filePath, BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { AsynchronousFileChannel channel; try { channel = AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { return Mono.error(e); } return Mono.justOrEmpty(range) .switchIfEmpty(getFullBlobRange(accessConditions)) .flatMapMany(rg -> Flux.fromIterable(sliceBlobRange(rg))) .flatMap(chunk -> blobAsyncRawClient .download(chunk, accessConditions, rangeGetContentMD5) .subscribeOn(Schedulers.elastic()) .flatMap(dar -> FluxUtil.bytebufStreamToFile(dar.body(options), channel, chunk.offset() - (range == null ? 0 : range.offset()))) .timeout(Duration.ofSeconds(300)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException)) .then() .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw new UncheckedIOException(e); } }); } private List<BlobRange> sliceBlobRange(BlobRange blobRange) { long offset = blobRange.offset(); long length = blobRange.count(); List<BlobRange> chunks = new ArrayList<>(); for (long pos = offset; pos < offset + length; pos += BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE) { long count = BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE; if (pos + count > offset + length) { count = offset + length - pos; } chunks.add(new BlobRange(pos, count)); } return chunks; } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete() { return this.delete(null, null); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @param deleteBlobSnapshotOptions * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must * pass null. * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobAccessConditions accessConditions) { return blobAsyncRawClient .delete(deleteBlobSnapshotOptions, accessConditions) .map(VoidResponse::new); } /** * Returns the blob's metadata and properties. * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties() { return this.getProperties(null); } /** * Returns the blob's metadata and properties. * * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties(BlobAccessConditions accessConditions) { return blobAsyncRawClient .getProperties(accessConditions) .map(rb -> new SimpleResponse<>(rb, new BlobProperties(rb.deserializedHeaders()))); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers) { return this.setHTTPHeaders(headers, null); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setHTTPHeaders(headers, accessConditions) .map(VoidResponse::new); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata) { return this.setMetadata(metadata, null); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setMetadata(metadata, accessConditions) .map(VoidResponse::new); } /** * Creates a read-only snapshot of a blob. * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot() { return this.createSnapshot(null, null); } /** * Creates a read-only snapshot of a blob. * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .createSnapshot(metadata, accessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().snapshot())); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier) { return this.setTier(tier, null); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .setTier(tier, leaseAccessConditions) .map(VoidResponse::new); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> undelete() { return blobAsyncRawClient .undelete() .map(VoidResponse::new); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedId * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedID * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .acquireLease(proposedID, duration, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID) { return this.renewLease(leaseID, null); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .renewLease(leaseID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID) { return this.releaseLease(leaseID, null); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .releaseLease(leaseID, modifiedAccessConditions) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease() { return this.breakLease(null, null); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @param breakPeriodInSeconds * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be * available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .breakLease(breakPeriodInSeconds, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseTime())); } /** * ChangeLease changes the blob's lease ID. * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * * @return * A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null); } /** * ChangeLease changes the blob's lease ID. For more information, see the <a href="https: * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .changeLease(leaseId, proposedID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. For more information, please see the <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfo() { return blobAsyncRawClient .getAccountInfo() .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } }
class BlobAsyncClient { private static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final int BLOB_MAX_DOWNLOAD_BLOCK_SIZE = 100 * Constants.MB; final BlobAsyncRawClient blobAsyncRawClient; /** * Package-private constructor for use by {@link BlobClientBuilder}. * @param azureBlobStorageBuilder the API client builder for blob storage API */ BlobAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder, String snapshot) { this.blobAsyncRawClient = new BlobAsyncRawClient(azureBlobStorageBuilder.build(), snapshot); } /** * Static method for getting a new builder for this class. * * @return * A new {@link BlobClientBuilder} instance. */ public static BlobClientBuilder blobClientBuilder() { return new BlobClientBuilder(); } /** * Creates a new {@link BlockBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be block blobs. * * @return * A {@link BlockBlobAsyncClient} to this resource. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), blobAsyncRawClient.snapshot); } /** * Creates a new {@link AppendBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be append blobs. * * @return * A {@link AppendBlobAsyncClient} to this resource. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), blobAsyncRawClient.snapshot); } /** * Creates a new {@link PageBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be page blobs. * * @return * A {@link PageBlobAsyncClient} to this resource. */ public PageBlobAsyncClient asPageBlobAsyncClient() { return new PageBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), blobAsyncRawClient.snapshot); } /** * Initializes a {@link ContainerAsyncClient} object pointing to the container this blob is in. This method does * not create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * * @return * A {@link ContainerAsyncClient} object pointing to the container containing the blob */ public ContainerAsyncClient getContainerAsyncClient() { try { BlobURLParts parts = URLParser.parse(getBlobUrl()); return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s: .pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline())); } catch (UnknownHostException e) { throw new RuntimeException(e); } } /** * Gets the URL of the blob represented by this client. * @return the URL. */ public URL getBlobUrl() { try { UrlBuilder urlBuilder = UrlBuilder.parse(blobAsyncRawClient.azureBlobStorage.url()); if (blobAsyncRawClient.snapshot != null) { urlBuilder.query("snapshot=" + blobAsyncRawClient.snapshot); } return urlBuilder.toURL(); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), blobAsyncRawClient.azureBlobStorage.url()), e); } } /** * Gets if the blob this client represents exists in the cloud. * * @return * true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> exists() { return this.getProperties() .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL) { return this.startCopyFromURL(sourceURL, null, null, null); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .startCopyFromURL(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId) { return this.abortCopyFromURL(copyId, null); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .abortCopyFromURL(copyId, leaseAccessConditions) .map(VoidResponse::new); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource) { return this.copyFromURL(copySource, null, null, null); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .syncCopyFromURL(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download() { return this.download(null, null, false, null); } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param range * {@link BlobRange} * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download(BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { return blobAsyncRawClient .download(range, accessConditions, rangeGetContentMD5) .map(response -> new SimpleResponse<>( response.rawResponse(), response.body(options).map(ByteBuf::nioBuffer).switchIfEmpty(Flux.just(ByteBuffer.allocate(0))))); } /** * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * <p> * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * use the other overload providing the {@link BlobRange} parameter. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. */ public Mono<Void> downloadToFile(String filePath) { return this.downloadToFile(filePath, null, BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE, null, false, null); } /** * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * <p> * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * provide the {@link BlobRange} parameter. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range * {@link BlobRange} * @param blockSize * the size of a chunk to download at a time, in bytes * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. */ public Mono<Void> downloadToFile(String filePath, BlobRange range, Integer blockSize, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { if (blockSize < 0 || blockSize > BLOB_MAX_DOWNLOAD_BLOCK_SIZE) { throw new IllegalArgumentException("Block size should not exceed 100MB"); } return Mono.using(() -> { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw new UncheckedIOException(e); } }, channel -> Mono.justOrEmpty(range) .switchIfEmpty(getFullBlobRange(accessConditions)) .flatMapMany(rg -> Flux.fromIterable(sliceBlobRange(rg, blockSize))) .flatMap(chunk -> blobAsyncRawClient .download(chunk, accessConditions, rangeGetContentMD5) .subscribeOn(Schedulers.elastic()) .flatMap(dar -> FluxUtil.bytebufStreamToFile(dar.body(options), channel, chunk.offset() - (range == null ? 0 : range.offset())))) .then(), channel -> { try { channel.close(); } catch (IOException e) { throw new UncheckedIOException(e); } }); } private List<BlobRange> sliceBlobRange(BlobRange blobRange, Integer blockSize) { if (blockSize == null) { blockSize = BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE; } long offset = blobRange.offset(); long length = blobRange.count(); List<BlobRange> chunks = new ArrayList<>(); for (long pos = offset; pos < offset + length; pos += blockSize) { long count = blockSize; if (pos + count > offset + length) { count = offset + length - pos; } chunks.add(new BlobRange(pos, count)); } return chunks; } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete() { return this.delete(null, null); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @param deleteBlobSnapshotOptions * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must * pass null. * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobAccessConditions accessConditions) { return blobAsyncRawClient .delete(deleteBlobSnapshotOptions, accessConditions) .map(VoidResponse::new); } /** * Returns the blob's metadata and properties. * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties() { return this.getProperties(null); } /** * Returns the blob's metadata and properties. * * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties(BlobAccessConditions accessConditions) { return blobAsyncRawClient .getProperties(accessConditions) .map(rb -> new SimpleResponse<>(rb, new BlobProperties(rb.deserializedHeaders()))); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers) { return this.setHTTPHeaders(headers, null); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setHTTPHeaders(headers, accessConditions) .map(VoidResponse::new); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata) { return this.setMetadata(metadata, null); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setMetadata(metadata, accessConditions) .map(VoidResponse::new); } /** * Creates a read-only snapshot of a blob. * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot() { return this.createSnapshot(null, null); } /** * Creates a read-only snapshot of a blob. * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .createSnapshot(metadata, accessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().snapshot())); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier) { return this.setTier(tier, null); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .setTier(tier, leaseAccessConditions) .map(VoidResponse::new); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> undelete() { return blobAsyncRawClient .undelete() .map(VoidResponse::new); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedId * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedID * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .acquireLease(proposedID, duration, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID) { return this.renewLease(leaseID, null); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .renewLease(leaseID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID) { return this.releaseLease(leaseID, null); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .releaseLease(leaseID, modifiedAccessConditions) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease() { return this.breakLease(null, null); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @param breakPeriodInSeconds * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be * available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .breakLease(breakPeriodInSeconds, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseTime())); } /** * ChangeLease changes the blob's lease ID. * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * * @return * A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null); } /** * ChangeLease changes the blob's lease ID. For more information, see the <a href="https: * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .changeLease(leaseId, proposedID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. For more information, please see the <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfo() { return blobAsyncRawClient .getAccountInfo() .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } }
This is called only once for each large file download, how is it doubling the number of IOs? If the files are really small, then yes - but without the full length the client is not able to parallelize the download work. The best way to avoid it is to provide the `BlobRange` parameter and I'll document it here.
private Mono<BlobRange> getFullBlobRange(BlobAccessConditions accessConditions) { return getProperties(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize())); }
return getProperties(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize()));
private Mono<BlobRange> getFullBlobRange(BlobAccessConditions accessConditions) { return getProperties(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize())); }
class BlobAsyncClient { private static final long BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; final BlobAsyncRawClient blobAsyncRawClient; private final String snapshot; /** * Package-private constructor for use by {@link BlobClientBuilder}. * @param azureBlobStorageBuilder the API client builder for blob storage API */ BlobAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder, String snapshot) { this.blobAsyncRawClient = new BlobAsyncRawClient(azureBlobStorageBuilder.build()); this.snapshot = snapshot; } /** * Static method for getting a new builder for this class. * * @return * A new {@link BlobClientBuilder} instance. */ public static BlobClientBuilder blobClientBuilder() { return new BlobClientBuilder(); } /** * Creates a new {@link BlockBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be block blobs. * * @return * A {@link BlockBlobAsyncClient} to this resource. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), snapshot); } /** * Creates a new {@link AppendBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be append blobs. * * @return * A {@link AppendBlobAsyncClient} to this resource. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline())); } /** * Creates a new {@link PageBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be page blobs. * * @return * A {@link PageBlobAsyncClient} to this resource. */ public PageBlobAsyncClient asPageBlobAsyncClient() { return new PageBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), snapshot); } /** * Initializes a {@link ContainerAsyncClient} object pointing to the containing this blob is in. This method does * not create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * * @return * A {@link ContainerAsyncClient} object pointing to the container containing the blob */ public ContainerAsyncClient getContainerAsyncClient() { return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(Utility.stripLastPathSegment(getBlobUrl()).toString()) .pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline())); } /** * Gets the URL of the blob represented by this client. * @return the URL. */ public URL getBlobUrl() { try { return new URL(blobAsyncRawClient.azureBlobStorage.url()); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), blobAsyncRawClient.azureBlobStorage.url()), e); } } /** * Gets if the blob this client represents exists in the cloud. * * @return * true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> exists() { return this.getProperties() .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL) { return this.startCopyFromURL(sourceURL, null, null, null); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .startCopyFromURL(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId) { return this.abortCopyFromURL(copyId, null); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .abortCopyFromURL(copyId, leaseAccessConditions) .map(VoidResponse::new); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource) { return this.copyFromURL(copySource, null, null, null); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .syncCopyFromURL(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download() { return this.download(null, null, false, null); } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param range * {@link BlobRange} * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download(BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { return blobAsyncRawClient .download(range, accessConditions, rangeGetContentMD5) .map(response -> new SimpleResponse<>( response.rawResponse(), response.body(options).map(ByteBuf::nioBuffer).switchIfEmpty(Flux.just(ByteBuffer.allocate(0))))); } /** * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. */ public Mono<Void> downloadToFile(String filePath) { return this.downloadToFile(filePath, null, null, false, null); } /** * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range * {@link BlobRange} * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. */ public Mono<Void> downloadToFile(String filePath, BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { AsynchronousFileChannel channel; try { channel = AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { return Mono.error(e); } return Mono.justOrEmpty(range) .switchIfEmpty(getFullBlobRange(accessConditions)) .flatMapMany(rg -> Flux.fromIterable(sliceBlobRange(rg))) .flatMap(chunk -> blobAsyncRawClient .download(chunk, accessConditions, rangeGetContentMD5) .subscribeOn(Schedulers.elastic()) .flatMap(dar -> FluxUtil.bytebufStreamToFile(dar.body(options), channel, chunk.offset() - (range == null ? 0 : range.offset()))) .timeout(Duration.ofSeconds(300)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException)) .then() .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw new UncheckedIOException(e); } }); } private List<BlobRange> sliceBlobRange(BlobRange blobRange) { long offset = blobRange.offset(); long length = blobRange.count(); List<BlobRange> chunks = new ArrayList<>(); for (long pos = offset; pos < offset + length; pos += BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE) { long count = BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE; if (pos + count > offset + length) { count = offset + length - pos; } chunks.add(new BlobRange(pos, count)); } return chunks; } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete() { return this.delete(null, null); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @param deleteBlobSnapshotOptions * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must * pass null. * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobAccessConditions accessConditions) { return blobAsyncRawClient .delete(deleteBlobSnapshotOptions, accessConditions) .map(VoidResponse::new); } /** * Returns the blob's metadata and properties. * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties() { return this.getProperties(null); } /** * Returns the blob's metadata and properties. * * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties(BlobAccessConditions accessConditions) { return blobAsyncRawClient .getProperties(accessConditions) .map(rb -> new SimpleResponse<>(rb, new BlobProperties(rb.deserializedHeaders()))); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers) { return this.setHTTPHeaders(headers, null); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setHTTPHeaders(headers, accessConditions) .map(VoidResponse::new); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata) { return this.setMetadata(metadata, null); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setMetadata(metadata, accessConditions) .map(VoidResponse::new); } /** * Creates a read-only snapshot of a blob. * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot() { return this.createSnapshot(null, null); } /** * Creates a read-only snapshot of a blob. * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .createSnapshot(metadata, accessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().snapshot())); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier) { return this.setTier(tier, null); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .setTier(tier, leaseAccessConditions) .map(VoidResponse::new); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> undelete() { return blobAsyncRawClient .undelete() .map(VoidResponse::new); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedId * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedID * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .acquireLease(proposedID, duration, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID) { return this.renewLease(leaseID, null); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .renewLease(leaseID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID) { return this.releaseLease(leaseID, null); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .releaseLease(leaseID, modifiedAccessConditions) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease() { return this.breakLease(null, null); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @param breakPeriodInSeconds * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be * available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .breakLease(breakPeriodInSeconds, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseTime())); } /** * ChangeLease changes the blob's lease ID. * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * * @return * A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null); } /** * ChangeLease changes the blob's lease ID. For more information, see the <a href="https: * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .changeLease(leaseId, proposedID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. For more information, please see the <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfo() { return blobAsyncRawClient .getAccountInfo() .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } }
class BlobAsyncClient { private static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final int BLOB_MAX_DOWNLOAD_BLOCK_SIZE = 100 * Constants.MB; final BlobAsyncRawClient blobAsyncRawClient; /** * Package-private constructor for use by {@link BlobClientBuilder}. * @param azureBlobStorageBuilder the API client builder for blob storage API */ BlobAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder, String snapshot) { this.blobAsyncRawClient = new BlobAsyncRawClient(azureBlobStorageBuilder.build(), snapshot); } /** * Static method for getting a new builder for this class. * * @return * A new {@link BlobClientBuilder} instance. */ public static BlobClientBuilder blobClientBuilder() { return new BlobClientBuilder(); } /** * Creates a new {@link BlockBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be block blobs. * * @return * A {@link BlockBlobAsyncClient} to this resource. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), blobAsyncRawClient.snapshot); } /** * Creates a new {@link AppendBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be append blobs. * * @return * A {@link AppendBlobAsyncClient} to this resource. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), blobAsyncRawClient.snapshot); } /** * Creates a new {@link PageBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be page blobs. * * @return * A {@link PageBlobAsyncClient} to this resource. */ public PageBlobAsyncClient asPageBlobAsyncClient() { return new PageBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), blobAsyncRawClient.snapshot); } /** * Initializes a {@link ContainerAsyncClient} object pointing to the container this blob is in. This method does * not create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * * @return * A {@link ContainerAsyncClient} object pointing to the container containing the blob */ public ContainerAsyncClient getContainerAsyncClient() { try { BlobURLParts parts = URLParser.parse(getBlobUrl()); return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s: .pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline())); } catch (UnknownHostException e) { throw new RuntimeException(e); } } /** * Gets the URL of the blob represented by this client. * @return the URL. */ public URL getBlobUrl() { try { UrlBuilder urlBuilder = UrlBuilder.parse(blobAsyncRawClient.azureBlobStorage.url()); if (blobAsyncRawClient.snapshot != null) { urlBuilder.query("snapshot=" + blobAsyncRawClient.snapshot); } return urlBuilder.toURL(); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), blobAsyncRawClient.azureBlobStorage.url()), e); } } /** * Gets if the blob this client represents exists in the cloud. * * @return * true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> exists() { return this.getProperties() .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL) { return this.startCopyFromURL(sourceURL, null, null, null); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .startCopyFromURL(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId) { return this.abortCopyFromURL(copyId, null); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .abortCopyFromURL(copyId, leaseAccessConditions) .map(VoidResponse::new); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource) { return this.copyFromURL(copySource, null, null, null); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .syncCopyFromURL(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download() { return this.download(null, null, false, null); } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param range * {@link BlobRange} * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download(BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { return blobAsyncRawClient .download(range, accessConditions, rangeGetContentMD5) .map(response -> new SimpleResponse<>( response.rawResponse(), response.body(options).map(ByteBuf::nioBuffer).switchIfEmpty(Flux.just(ByteBuffer.allocate(0))))); } /** * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * <p> * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * use the other overload providing the {@link BlobRange} parameter. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. */ public Mono<Void> downloadToFile(String filePath) { return this.downloadToFile(filePath, null, BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE, null, false, null); } /** * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * <p> * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * provide the {@link BlobRange} parameter. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range * {@link BlobRange} * @param blockSize * the size of a chunk to download at a time, in bytes * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. */ public Mono<Void> downloadToFile(String filePath, BlobRange range, Integer blockSize, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { if (blockSize < 0 || blockSize > BLOB_MAX_DOWNLOAD_BLOCK_SIZE) { throw new IllegalArgumentException("Block size should not exceed 100MB"); } return Mono.using(() -> { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw new UncheckedIOException(e); } }, channel -> Mono.justOrEmpty(range) .switchIfEmpty(getFullBlobRange(accessConditions)) .flatMapMany(rg -> Flux.fromIterable(sliceBlobRange(rg, blockSize))) .flatMap(chunk -> blobAsyncRawClient .download(chunk, accessConditions, rangeGetContentMD5) .subscribeOn(Schedulers.elastic()) .flatMap(dar -> FluxUtil.bytebufStreamToFile(dar.body(options), channel, chunk.offset() - (range == null ? 0 : range.offset())))) .then(), channel -> { try { channel.close(); } catch (IOException e) { throw new UncheckedIOException(e); } }); } private List<BlobRange> sliceBlobRange(BlobRange blobRange, Integer blockSize) { if (blockSize == null) { blockSize = BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE; } long offset = blobRange.offset(); long length = blobRange.count(); List<BlobRange> chunks = new ArrayList<>(); for (long pos = offset; pos < offset + length; pos += blockSize) { long count = blockSize; if (pos + count > offset + length) { count = offset + length - pos; } chunks.add(new BlobRange(pos, count)); } return chunks; } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete() { return this.delete(null, null); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @param deleteBlobSnapshotOptions * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must * pass null. * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobAccessConditions accessConditions) { return blobAsyncRawClient .delete(deleteBlobSnapshotOptions, accessConditions) .map(VoidResponse::new); } /** * Returns the blob's metadata and properties. * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties() { return this.getProperties(null); } /** * Returns the blob's metadata and properties. * * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties(BlobAccessConditions accessConditions) { return blobAsyncRawClient .getProperties(accessConditions) .map(rb -> new SimpleResponse<>(rb, new BlobProperties(rb.deserializedHeaders()))); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers) { return this.setHTTPHeaders(headers, null); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setHTTPHeaders(headers, accessConditions) .map(VoidResponse::new); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata) { return this.setMetadata(metadata, null); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setMetadata(metadata, accessConditions) .map(VoidResponse::new); } /** * Creates a read-only snapshot of a blob. * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot() { return this.createSnapshot(null, null); } /** * Creates a read-only snapshot of a blob. * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .createSnapshot(metadata, accessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().snapshot())); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier) { return this.setTier(tier, null); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .setTier(tier, leaseAccessConditions) .map(VoidResponse::new); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> undelete() { return blobAsyncRawClient .undelete() .map(VoidResponse::new); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedId * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedID * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .acquireLease(proposedID, duration, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID) { return this.renewLease(leaseID, null); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .renewLease(leaseID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID) { return this.releaseLease(leaseID, null); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .releaseLease(leaseID, modifiedAccessConditions) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease() { return this.breakLease(null, null); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @param breakPeriodInSeconds * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be * available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .breakLease(breakPeriodInSeconds, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseTime())); } /** * ChangeLease changes the blob's lease ID. * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * * @return * A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null); } /** * ChangeLease changes the blob's lease ID. For more information, see the <a href="https: * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .changeLease(leaseId, proposedID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. For more information, please see the <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfo() { return blobAsyncRawClient .getAccountInfo() .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } }
I don't understand what is etag locking so feel free to contribute. What kind of progress reporting are you looking for?
private Mono<BlobRange> getFullBlobRange(BlobAccessConditions accessConditions) { return getProperties(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize())); }
return getProperties(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize()));
private Mono<BlobRange> getFullBlobRange(BlobAccessConditions accessConditions) { return getProperties(accessConditions).map(rb -> new BlobRange(0, rb.value().blobSize())); }
class BlobAsyncClient { private static final long BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; final BlobAsyncRawClient blobAsyncRawClient; private final String snapshot; /** * Package-private constructor for use by {@link BlobClientBuilder}. * @param azureBlobStorageBuilder the API client builder for blob storage API */ BlobAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder, String snapshot) { this.blobAsyncRawClient = new BlobAsyncRawClient(azureBlobStorageBuilder.build()); this.snapshot = snapshot; } /** * Static method for getting a new builder for this class. * * @return * A new {@link BlobClientBuilder} instance. */ public static BlobClientBuilder blobClientBuilder() { return new BlobClientBuilder(); } /** * Creates a new {@link BlockBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be block blobs. * * @return * A {@link BlockBlobAsyncClient} to this resource. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), snapshot); } /** * Creates a new {@link AppendBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be append blobs. * * @return * A {@link AppendBlobAsyncClient} to this resource. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline())); } /** * Creates a new {@link PageBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be page blobs. * * @return * A {@link PageBlobAsyncClient} to this resource. */ public PageBlobAsyncClient asPageBlobAsyncClient() { return new PageBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), snapshot); } /** * Initializes a {@link ContainerAsyncClient} object pointing to the containing this blob is in. This method does * not create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * * @return * A {@link ContainerAsyncClient} object pointing to the container containing the blob */ public ContainerAsyncClient getContainerAsyncClient() { return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(Utility.stripLastPathSegment(getBlobUrl()).toString()) .pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline())); } /** * Gets the URL of the blob represented by this client. * @return the URL. */ public URL getBlobUrl() { try { return new URL(blobAsyncRawClient.azureBlobStorage.url()); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), blobAsyncRawClient.azureBlobStorage.url()), e); } } /** * Gets if the blob this client represents exists in the cloud. * * @return * true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> exists() { return this.getProperties() .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL) { return this.startCopyFromURL(sourceURL, null, null, null); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .startCopyFromURL(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId) { return this.abortCopyFromURL(copyId, null); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .abortCopyFromURL(copyId, leaseAccessConditions) .map(VoidResponse::new); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource) { return this.copyFromURL(copySource, null, null, null); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .syncCopyFromURL(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download() { return this.download(null, null, false, null); } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param range * {@link BlobRange} * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download(BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { return blobAsyncRawClient .download(range, accessConditions, rangeGetContentMD5) .map(response -> new SimpleResponse<>( response.rawResponse(), response.body(options).map(ByteBuf::nioBuffer).switchIfEmpty(Flux.just(ByteBuffer.allocate(0))))); } /** * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. */ public Mono<Void> downloadToFile(String filePath) { return this.downloadToFile(filePath, null, null, false, null); } /** * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range * {@link BlobRange} * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. */ public Mono<Void> downloadToFile(String filePath, BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { AsynchronousFileChannel channel; try { channel = AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { return Mono.error(e); } return Mono.justOrEmpty(range) .switchIfEmpty(getFullBlobRange(accessConditions)) .flatMapMany(rg -> Flux.fromIterable(sliceBlobRange(rg))) .flatMap(chunk -> blobAsyncRawClient .download(chunk, accessConditions, rangeGetContentMD5) .subscribeOn(Schedulers.elastic()) .flatMap(dar -> FluxUtil.bytebufStreamToFile(dar.body(options), channel, chunk.offset() - (range == null ? 0 : range.offset()))) .timeout(Duration.ofSeconds(300)) .retry(3, throwable -> throwable instanceof IOException || throwable instanceof TimeoutException)) .then() .doOnTerminate(() -> { try { channel.close(); } catch (IOException e) { throw new UncheckedIOException(e); } }); } private List<BlobRange> sliceBlobRange(BlobRange blobRange) { long offset = blobRange.offset(); long length = blobRange.count(); List<BlobRange> chunks = new ArrayList<>(); for (long pos = offset; pos < offset + length; pos += BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE) { long count = BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE; if (pos + count > offset + length) { count = offset + length - pos; } chunks.add(new BlobRange(pos, count)); } return chunks; } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete() { return this.delete(null, null); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @param deleteBlobSnapshotOptions * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must * pass null. * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobAccessConditions accessConditions) { return blobAsyncRawClient .delete(deleteBlobSnapshotOptions, accessConditions) .map(VoidResponse::new); } /** * Returns the blob's metadata and properties. * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties() { return this.getProperties(null); } /** * Returns the blob's metadata and properties. * * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties(BlobAccessConditions accessConditions) { return blobAsyncRawClient .getProperties(accessConditions) .map(rb -> new SimpleResponse<>(rb, new BlobProperties(rb.deserializedHeaders()))); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers) { return this.setHTTPHeaders(headers, null); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setHTTPHeaders(headers, accessConditions) .map(VoidResponse::new); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata) { return this.setMetadata(metadata, null); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setMetadata(metadata, accessConditions) .map(VoidResponse::new); } /** * Creates a read-only snapshot of a blob. * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot() { return this.createSnapshot(null, null); } /** * Creates a read-only snapshot of a blob. * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .createSnapshot(metadata, accessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().snapshot())); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier) { return this.setTier(tier, null); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .setTier(tier, leaseAccessConditions) .map(VoidResponse::new); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> undelete() { return blobAsyncRawClient .undelete() .map(VoidResponse::new); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedId * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedID * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .acquireLease(proposedID, duration, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID) { return this.renewLease(leaseID, null); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .renewLease(leaseID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID) { return this.releaseLease(leaseID, null); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .releaseLease(leaseID, modifiedAccessConditions) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease() { return this.breakLease(null, null); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @param breakPeriodInSeconds * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be * available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .breakLease(breakPeriodInSeconds, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseTime())); } /** * ChangeLease changes the blob's lease ID. * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * * @return * A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null); } /** * ChangeLease changes the blob's lease ID. For more information, see the <a href="https: * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .changeLease(leaseId, proposedID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. For more information, please see the <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfo() { return blobAsyncRawClient .getAccountInfo() .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } }
class BlobAsyncClient { private static final int BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE = 4 * Constants.MB; private static final int BLOB_MAX_DOWNLOAD_BLOCK_SIZE = 100 * Constants.MB; final BlobAsyncRawClient blobAsyncRawClient; /** * Package-private constructor for use by {@link BlobClientBuilder}. * @param azureBlobStorageBuilder the API client builder for blob storage API */ BlobAsyncClient(AzureBlobStorageBuilder azureBlobStorageBuilder, String snapshot) { this.blobAsyncRawClient = new BlobAsyncRawClient(azureBlobStorageBuilder.build(), snapshot); } /** * Static method for getting a new builder for this class. * * @return * A new {@link BlobClientBuilder} instance. */ public static BlobClientBuilder blobClientBuilder() { return new BlobClientBuilder(); } /** * Creates a new {@link BlockBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be block blobs. * * @return * A {@link BlockBlobAsyncClient} to this resource. */ public BlockBlobAsyncClient asBlockBlobAsyncClient() { return new BlockBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), blobAsyncRawClient.snapshot); } /** * Creates a new {@link AppendBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be append blobs. * * @return * A {@link AppendBlobAsyncClient} to this resource. */ public AppendBlobAsyncClient asAppendBlobAsyncClient() { return new AppendBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), blobAsyncRawClient.snapshot); } /** * Creates a new {@link PageBlobAsyncClient} to this resource, maintaining configurations. Only do this for blobs * that are known to be page blobs. * * @return * A {@link PageBlobAsyncClient} to this resource. */ public PageBlobAsyncClient asPageBlobAsyncClient() { return new PageBlobAsyncClient(new AzureBlobStorageBuilder().url(getBlobUrl().toString()).pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline()), blobAsyncRawClient.snapshot); } /** * Initializes a {@link ContainerAsyncClient} object pointing to the container this blob is in. This method does * not create a container. It simply constructs the URL to the container and offers access to methods relevant to * containers. * * @return * A {@link ContainerAsyncClient} object pointing to the container containing the blob */ public ContainerAsyncClient getContainerAsyncClient() { try { BlobURLParts parts = URLParser.parse(getBlobUrl()); return new ContainerAsyncClient(new AzureBlobStorageBuilder() .url(String.format("%s: .pipeline(blobAsyncRawClient.azureBlobStorage.httpPipeline())); } catch (UnknownHostException e) { throw new RuntimeException(e); } } /** * Gets the URL of the blob represented by this client. * @return the URL. */ public URL getBlobUrl() { try { UrlBuilder urlBuilder = UrlBuilder.parse(blobAsyncRawClient.azureBlobStorage.url()); if (blobAsyncRawClient.snapshot != null) { urlBuilder.query("snapshot=" + blobAsyncRawClient.snapshot); } return urlBuilder.toURL(); } catch (MalformedURLException e) { throw new RuntimeException(String.format("Invalid URL on %s: %s" + getClass().getSimpleName(), blobAsyncRawClient.azureBlobStorage.url()), e); } } /** * Gets if the blob this client represents exists in the cloud. * * @return * true if the blob exists, false if it doesn't */ public Mono<Response<Boolean>> exists() { return this.getProperties() .map(cp -> (Response<Boolean>) new SimpleResponse<>(cp, true)) .onErrorResume(t -> t instanceof StorageException && ((StorageException) t).statusCode() == 404, t -> { HttpResponse response = ((StorageException) t).response(); return Mono.just(new SimpleResponse<>(response.request(), response.statusCode(), response.headers(), false)); }); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL) { return this.startCopyFromURL(sourceURL, null, null, null); } /** * Copies the data at the source URL to a blob. For more information, see the <a * * href="https: * * @param sourceURL * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> startCopyFromURL(URL sourceURL, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .startCopyFromURL(sourceURL, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId) { return this.abortCopyFromURL(copyId, null); } /** * Stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. * * @param copyId * The id of the copy operation to abort. Returned as the {@code copyId} field on the {@link * BlobStartCopyFromURLHeaders} object. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> abortCopyFromURL(String copyId, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .abortCopyFromURL(copyId, leaseAccessConditions) .map(VoidResponse::new); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource) { return this.copyFromURL(copySource, null, null, null); } /** * Copies the data at the source URL to a blob and waits for the copy to complete before returning a response. * * @param copySource * The source URL to copy from. URLs outside of Azure may only be copied to block blobs. * @param metadata * {@link Metadata} * @param sourceModifiedAccessConditions * {@link ModifiedAccessConditions} against the source. Standard HTTP Access conditions related to the * modification of data. ETag and LastModifiedTime are used to construct conditions related to when the blob * was changed relative to the given request. The request will fail if the specified condition is not * satisfied. * @param destAccessConditions * {@link BlobAccessConditions} against the destination. * * @return * A reactive response containing the copy ID for the long running operation. */ public Mono<Response<String>> copyFromURL(URL copySource, Metadata metadata, ModifiedAccessConditions sourceModifiedAccessConditions, BlobAccessConditions destAccessConditions) { return blobAsyncRawClient .syncCopyFromURL(copySource, metadata, sourceModifiedAccessConditions, destAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().copyId())); } /** * Reads the entire blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download() { return this.download(null, null, false, null); } /** * Reads a range of bytes from a blob. Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * * @param range * {@link BlobRange} * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. * * @return * A reactive response containing the blob data. */ public Mono<Response<Flux<ByteBuffer>>> download(BlobRange range, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { return blobAsyncRawClient .download(range, accessConditions, rangeGetContentMD5) .map(response -> new SimpleResponse<>( response.rawResponse(), response.body(options).map(ByteBuf::nioBuffer).switchIfEmpty(Flux.just(ByteBuffer.allocate(0))))); } /** * Downloads the entire blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * <p> * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * use the other overload providing the {@link BlobRange} parameter. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. */ public Mono<Void> downloadToFile(String filePath) { return this.downloadToFile(filePath, null, BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE, null, false, null); } /** * Downloads a range of bytes blob into a file specified by the path. The file will be created if it doesn't exist. * Uploading data must be done from the {@link BlockBlobClient}, {@link PageBlobClient}, or {@link AppendBlobClient}. * <p> * This method makes an extra HTTP call to get the length of the blob in the beginning. To avoid this extra call, * provide the {@link BlobRange} parameter. * * @param filePath * A non-null {@link OutputStream} instance where the downloaded data will be written. * @param range * {@link BlobRange} * @param blockSize * the size of a chunk to download at a time, in bytes * @param accessConditions * {@link BlobAccessConditions} * @param rangeGetContentMD5 * Whether the contentMD5 for the specified blob range should be returned. */ public Mono<Void> downloadToFile(String filePath, BlobRange range, Integer blockSize, BlobAccessConditions accessConditions, boolean rangeGetContentMD5, ReliableDownloadOptions options) { if (blockSize < 0 || blockSize > BLOB_MAX_DOWNLOAD_BLOCK_SIZE) { throw new IllegalArgumentException("Block size should not exceed 100MB"); } return Mono.using(() -> { try { return AsynchronousFileChannel.open(Paths.get(filePath), StandardOpenOption.READ, StandardOpenOption.WRITE); } catch (IOException e) { throw new UncheckedIOException(e); } }, channel -> Mono.justOrEmpty(range) .switchIfEmpty(getFullBlobRange(accessConditions)) .flatMapMany(rg -> Flux.fromIterable(sliceBlobRange(rg, blockSize))) .flatMap(chunk -> blobAsyncRawClient .download(chunk, accessConditions, rangeGetContentMD5) .subscribeOn(Schedulers.elastic()) .flatMap(dar -> FluxUtil.bytebufStreamToFile(dar.body(options), channel, chunk.offset() - (range == null ? 0 : range.offset())))) .then(), channel -> { try { channel.close(); } catch (IOException e) { throw new UncheckedIOException(e); } }); } private List<BlobRange> sliceBlobRange(BlobRange blobRange, Integer blockSize) { if (blockSize == null) { blockSize = BLOB_DEFAULT_DOWNLOAD_BLOCK_SIZE; } long offset = blobRange.offset(); long length = blobRange.count(); List<BlobRange> chunks = new ArrayList<>(); for (long pos = offset; pos < offset + length; pos += blockSize) { long count = blockSize; if (pos + count > offset + length) { count = offset + length - pos; } chunks.add(new BlobRange(pos, count)); } return chunks; } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete() { return this.delete(null, null); } /** * Deletes the specified blob or snapshot. Note that deleting a blob also deletes all its snapshots. * * @param deleteBlobSnapshotOptions * Specifies the behavior for deleting the snapshots on this blob. {@code Include} will delete the base blob * and all snapshots. {@code Only} will delete only the snapshots. If a snapshot is being deleted, you must * pass null. * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> delete(DeleteSnapshotsOptionType deleteBlobSnapshotOptions, BlobAccessConditions accessConditions) { return blobAsyncRawClient .delete(deleteBlobSnapshotOptions, accessConditions) .map(VoidResponse::new); } /** * Returns the blob's metadata and properties. * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties() { return this.getProperties(null); } /** * Returns the blob's metadata and properties. * * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the blob properties and metadata. */ public Mono<Response<BlobProperties>> getProperties(BlobAccessConditions accessConditions) { return blobAsyncRawClient .getProperties(accessConditions) .map(rb -> new SimpleResponse<>(rb, new BlobProperties(rb.deserializedHeaders()))); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers) { return this.setHTTPHeaders(headers, null); } /** * Changes a blob's HTTP header properties. if only one HTTP header is updated, the * others will all be erased. In order to preserve existing values, they must be * passed alongside the header being changed. For more information, see the * <a href="https: * * @param headers * {@link BlobHTTPHeaders} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setHTTPHeaders(BlobHTTPHeaders headers, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setHTTPHeaders(headers, accessConditions) .map(VoidResponse::new); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata) { return this.setMetadata(metadata, null); } /** * Changes a blob's metadata. The specified metadata in this method will replace existing * metadata. If old values must be preserved, they must be downloaded and included in the * call to this method. For more information, see the <a href="https: * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setMetadata(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .setMetadata(metadata, accessConditions) .map(VoidResponse::new); } /** * Creates a read-only snapshot of a blob. * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot() { return this.createSnapshot(null, null); } /** * Creates a read-only snapshot of a blob. * * @param metadata * {@link Metadata} * @param accessConditions * {@link BlobAccessConditions} * * @return * A reactive response containing the ID of the new snapshot. */ public Mono<Response<String>> createSnapshot(Metadata metadata, BlobAccessConditions accessConditions) { return blobAsyncRawClient .createSnapshot(metadata, accessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().snapshot())); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier) { return this.setTier(tier, null); } /** * Sets the tier on a blob. The operation is allowed on a page blob in a premium storage account or a block blob in * a blob storage or GPV2 account. A premium page blob's tier determines the allowed size, IOPS, and bandwidth of * the blob. A block blob's tier determines the Hot/Cool/Archive storage type. This does not update the blob's etag. * * @param tier * The new tier for the blob. * @param leaseAccessConditions * By setting lease access conditions, requests will fail if the provided lease does not match the active * lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> setTier(AccessTier tier, LeaseAccessConditions leaseAccessConditions) { return blobAsyncRawClient .setTier(tier, leaseAccessConditions) .map(VoidResponse::new); } /** * Undelete restores the content and metadata of a soft-deleted blob and/or any associated soft-deleted snapshots. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> undelete() { return blobAsyncRawClient .undelete() .map(VoidResponse::new); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedId * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedId, int duration) { return this.acquireLease(proposedId, duration, null); } /** * Acquires a lease on the blob for write and delete operations. The lease duration must be between 15 to 60 * seconds, or infinite (-1). * * @param proposedID * A {@code String} in any valid GUID format. May be null. * @param duration * The duration of the lease, in seconds, or negative one (-1) for a lease that * never expires. A non-infinite lease can be between 15 and 60 seconds. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the lease ID. */ public Mono<Response<String>> acquireLease(String proposedID, int duration, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .acquireLease(proposedID, duration, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID) { return this.renewLease(leaseID, null); } /** * Renews the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the renewed lease ID. */ public Mono<Response<String>> renewLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .renewLease(leaseID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID) { return this.releaseLease(leaseID, null); } /** * Releases the blob's previously-acquired lease. * * @param leaseID * The leaseId of the active lease on the blob. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response signalling completion. */ public Mono<VoidResponse> releaseLease(String leaseID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .releaseLease(leaseID, modifiedAccessConditions) .map(VoidResponse::new); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease() { return this.breakLease(null, null); } /** * BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) constant * to break a fixed-duration lease when it expires or an infinite lease immediately. * * @param breakPeriodInSeconds * An optional {@code Integer} representing the proposed duration of seconds that the lease should continue * before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the * time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be * available before the break period has expired, but the lease may be held for longer than the break * period. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return * A reactive response containing the remaining time in the broken lease in seconds. */ public Mono<Response<Integer>> breakLease(Integer breakPeriodInSeconds, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .breakLease(breakPeriodInSeconds, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseTime())); } /** * ChangeLease changes the blob's lease ID. * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * * @return * A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID) { return this.changeLease(leaseId, proposedID, null); } /** * ChangeLease changes the blob's lease ID. For more information, see the <a href="https: * * @param leaseId * The leaseId of the active lease on the blob. * @param proposedID * A {@code String} in any valid GUID format. * @param modifiedAccessConditions * Standard HTTP Access conditions related to the modification of data. ETag and LastModifiedTime are used * to construct conditions related to when the blob was changed relative to the given request. The request * will fail if the specified condition is not satisfied. * * @return A reactive response containing the new lease ID. */ public Mono<Response<String>> changeLease(String leaseId, String proposedID, ModifiedAccessConditions modifiedAccessConditions) { return blobAsyncRawClient .changeLease(leaseId, proposedID, modifiedAccessConditions) .map(rb -> new SimpleResponse<>(rb, rb.deserializedHeaders().leaseId())); } /** * Returns the sku name and account kind for the account. For more information, please see the <a href="https: * * @return a reactor response containing the sku name and account kind. */ public Mono<Response<StorageAccountInfo>> getAccountInfo() { return blobAsyncRawClient .getAccountInfo() .map(rb -> new SimpleResponse<>(rb, new StorageAccountInfo(rb.deserializedHeaders()))); } }
>MSICredentials() [](start = 49, length = 16) Replaced our custom implementations with this API. Verified it works on vm. #Resolved
public CompletableFuture<SecurityToken> getSecurityTokenAsync(String audience) { CompletableFuture<SecurityToken> tokenGeneratingFuture = new CompletableFuture<>(); MessagingFactory.INTERNAL_THREAD_POOL.execute(() -> { try { MSICredentials credentials = new MSICredentials(); String rawToken = credentials.getToken(SecurityConstants.SERVICEBUS_AAD_AUDIENCE_RESOURCE_URL); Date expiry = getExpirationDateTimeUtcFromToken(rawToken); tokenGeneratingFuture.complete(new SecurityToken(SecurityTokenType.JWT, audience, rawToken, Instant.now(), expiry.toInstant())); } catch (IOException e) { TRACE_LOGGER.error("ManagedIdentity token generation failed.", e); tokenGeneratingFuture.completeExceptionally(e); } catch (ParseException e) { TRACE_LOGGER.error("Could not parse the expiry time from the Managed Identity token string.", e); tokenGeneratingFuture.completeExceptionally(e); } }); return tokenGeneratingFuture; }
MSICredentials credentials = new MSICredentials();
public CompletableFuture<SecurityToken> getSecurityTokenAsync(String audience) { CompletableFuture<SecurityToken> tokenGeneratingFuture = new CompletableFuture<>(); MessagingFactory.INTERNAL_THREAD_POOL.execute(() -> { try { MSICredentials credentials = new MSICredentials(); String rawToken = credentials.getToken(SecurityConstants.SERVICEBUS_AAD_AUDIENCE_RESOURCE_URL); Date expiry = getExpirationDateTimeUtcFromToken(rawToken); tokenGeneratingFuture.complete(new SecurityToken(SecurityTokenType.JWT, audience, rawToken, Instant.now(), expiry.toInstant())); } catch (IOException e) { TRACE_LOGGER.error("ManagedIdentity token generation failed.", e); tokenGeneratingFuture.completeExceptionally(e); } catch (ParseException e) { TRACE_LOGGER.error("Could not parse the expiry time from the Managed Identity token string.", e); tokenGeneratingFuture.completeExceptionally(e); } }); return tokenGeneratingFuture; }
class ManagedIdentityTokenProvider extends TokenProvider { private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(ManagedIdentityTokenProvider.class); @Override static Date getExpirationDateTimeUtcFromToken(String token) throws ParseException { JWT jwt = JWTParser.parse(token); JWTClaimsSet claims = jwt.getJWTClaimsSet(); return claims.getExpirationTime(); } }
class ManagedIdentityTokenProvider extends TokenProvider { private static final Logger TRACE_LOGGER = LoggerFactory.getLogger(ManagedIdentityTokenProvider.class); @Override private static Date getExpirationDateTimeUtcFromToken(String token) throws ParseException { JWT jwt = JWTParser.parse(token); JWTClaimsSet claims = jwt.getJWTClaimsSet(); return claims.getExpirationTime(); } }
These three lines can be made into one line - you're concatenating three string constants.
public Mono<AccessToken> authenticateToManagedIdentityEnpoint(String msiEndpoint, String msiSecret, String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); HttpURLConnection connection = null; try { StringBuilder urlStringBuilder = new StringBuilder(String.format("%s?resource=%s&api-version=2017-09-01", msiEndpoint, resource)); if (clientId != null) { urlStringBuilder.append("&"); urlStringBuilder.append("client_id"); urlStringBuilder.append("="); urlStringBuilder.append(clientId); } URL url = new URL(urlStringBuilder.toString()); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Secret", msiSecret); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } }
urlStringBuilder.append("=");
public Mono<AccessToken> authenticateToManagedIdentityEnpoint(String msiEndpoint, String msiSecret, String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Secret", msiSecret); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } }
class IdentityClient { private final IdentityClientOptions options; private final SerializerAdapter adapter = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); /** * Creates an IdentityClient with default options. */ public IdentityClient() { this.options = new IdentityClientOptions(); } /** * Creates an IdentityClient with the given options. * @param options the options configuring the client. */ public IdentityClient(IdentityClientOptions options) { this.options = options; } /** * Asynchronously acquire a token from Active Directory with a client secret. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param clientSecret the client secret of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String tenantId, String clientId, String clientSecret, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { context.acquireToken( resource, new ClientCredential(clientId, clientSecret), Adal4jUtil.authenticationDelegate(callback)); }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String tenantId, String clientId, String pfxCertificatePath, String pfxCertificatePassword, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, Adal4jUtil.createAsymmetricKeyCredential(clientId, Files.readAllBytes(Paths.get(pfxCertificatePath)), pfxCertificatePassword), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pemCertificatePath the path to the PEM certificate of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String tenantId, String clientId, String pemCertificatePath, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, AsymmetricKeyCredential.create(clientId, Adal4jUtil.privateKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath))), Adal4jUtil.publicKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath)))), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } private static AuthenticationContext createAuthenticationContext(ExecutorService executor, String authorityUrl, ProxyOptions proxyOptions) { AuthenticationContext context; try { context = new AuthenticationContext(authorityUrl, false, executor); } catch (MalformedURLException mue) { throw Exceptions.propagate(mue); } if (proxyOptions != null) { context.setProxy(new Proxy(proxyOptions.type() == Type.HTTP ? Proxy.Type.HTTP : Proxy.Type.SOCKS, proxyOptions.address())); } return context; } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param clientId the client ID of the application service * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * @param clientId the client ID of the virtual machine * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version"); payload.append("="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&"); payload.append("resource"); payload.append("="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&"); payload.append("client_id"); payload.append("="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } int retry = 1; while (retry <= options.maxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException exception) { if (connection == null) { return Mono.error(new RuntimeException(String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = 0; try { responseCode = connection.getResponseCode(); } catch (IOException e) { return Mono.error(e); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.retryTimeout().apply(RANDOM.nextInt(retry)); retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.maxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { return Mono.error(new RuntimeException("Couldn't acquire access token from IMDS, verify your objectId, clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } return Mono.error(new RuntimeException(String.format("MSI: Failed to acquire tokens after retrying %s times", options.maxRetry()))); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new RuntimeException(ex); } } }
class IdentityClient { private final IdentityClientOptions options; private final SerializerAdapter adapter = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); /** * Creates an IdentityClient with default options. */ public IdentityClient() { this.options = new IdentityClientOptions(); } /** * Creates an IdentityClient with the given options. * @param options the options configuring the client. */ public IdentityClient(IdentityClientOptions options) { this.options = options; } /** * Asynchronously acquire a token from Active Directory with a client secret. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param clientSecret the client secret of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String tenantId, String clientId, String clientSecret, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { context.acquireToken( resource, new ClientCredential(clientId, clientSecret), Adal4jUtil.authenticationDelegate(callback)); }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String tenantId, String clientId, String pfxCertificatePath, String pfxCertificatePassword, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, Adal4jUtil.createAsymmetricKeyCredential(clientId, Files.readAllBytes(Paths.get(pfxCertificatePath)), pfxCertificatePassword), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pemCertificatePath the path to the PEM certificate of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String tenantId, String clientId, String pemCertificatePath, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, AsymmetricKeyCredential.create(clientId, Adal4jUtil.privateKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath))), Adal4jUtil.publicKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath)))), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } private static AuthenticationContext createAuthenticationContext(ExecutorService executor, String authorityUrl, ProxyOptions proxyOptions) { AuthenticationContext context; try { context = new AuthenticationContext(authorityUrl, false, executor); } catch (MalformedURLException mue) { throw Exceptions.propagate(mue); } if (proxyOptions != null) { context.setProxy(new Proxy(proxyOptions.type() == Type.HTTP ? Proxy.Type.HTTP : Proxy.Type.SOCKS, proxyOptions.address())); } return context; } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param clientId the client ID of the application service * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * @param clientId the client ID of the virtual machine * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } int retry = 1; while (retry <= options.maxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException exception) { if (connection == null) { return Mono.error(new RuntimeException(String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = 0; try { responseCode = connection.getResponseCode(); } catch (IOException e) { return Mono.error(e); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.retryTimeout().apply(RANDOM.nextInt(retry)); retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.maxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { return Mono.error(new RuntimeException("Couldn't acquire access token from IMDS, verify your objectId, clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } return Mono.error(new RuntimeException(String.format("MSI: Failed to acquire tokens after retrying %s times", options.maxRetry()))); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new RuntimeException(ex); } } }
It's weird that you're using string.format within a string builder. You should do only one (stringbuilder I would say).
public Mono<AccessToken> authenticateToManagedIdentityEnpoint(String msiEndpoint, String msiSecret, String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); HttpURLConnection connection = null; try { StringBuilder urlStringBuilder = new StringBuilder(String.format("%s?resource=%s&api-version=2017-09-01", msiEndpoint, resource)); if (clientId != null) { urlStringBuilder.append("&"); urlStringBuilder.append("client_id"); urlStringBuilder.append("="); urlStringBuilder.append(clientId); } URL url = new URL(urlStringBuilder.toString()); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Secret", msiSecret); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } }
StringBuilder urlStringBuilder = new StringBuilder(String.format("%s?resource=%s&api-version=2017-09-01", msiEndpoint, resource));
public Mono<AccessToken> authenticateToManagedIdentityEnpoint(String msiEndpoint, String msiSecret, String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Secret", msiSecret); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } }
class IdentityClient { private final IdentityClientOptions options; private final SerializerAdapter adapter = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); /** * Creates an IdentityClient with default options. */ public IdentityClient() { this.options = new IdentityClientOptions(); } /** * Creates an IdentityClient with the given options. * @param options the options configuring the client. */ public IdentityClient(IdentityClientOptions options) { this.options = options; } /** * Asynchronously acquire a token from Active Directory with a client secret. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param clientSecret the client secret of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String tenantId, String clientId, String clientSecret, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { context.acquireToken( resource, new ClientCredential(clientId, clientSecret), Adal4jUtil.authenticationDelegate(callback)); }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String tenantId, String clientId, String pfxCertificatePath, String pfxCertificatePassword, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, Adal4jUtil.createAsymmetricKeyCredential(clientId, Files.readAllBytes(Paths.get(pfxCertificatePath)), pfxCertificatePassword), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pemCertificatePath the path to the PEM certificate of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String tenantId, String clientId, String pemCertificatePath, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, AsymmetricKeyCredential.create(clientId, Adal4jUtil.privateKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath))), Adal4jUtil.publicKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath)))), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } private static AuthenticationContext createAuthenticationContext(ExecutorService executor, String authorityUrl, ProxyOptions proxyOptions) { AuthenticationContext context; try { context = new AuthenticationContext(authorityUrl, false, executor); } catch (MalformedURLException mue) { throw Exceptions.propagate(mue); } if (proxyOptions != null) { context.setProxy(new Proxy(proxyOptions.type() == Type.HTTP ? Proxy.Type.HTTP : Proxy.Type.SOCKS, proxyOptions.address())); } return context; } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param clientId the client ID of the application service * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * @param clientId the client ID of the virtual machine * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version"); payload.append("="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&"); payload.append("resource"); payload.append("="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&"); payload.append("client_id"); payload.append("="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } int retry = 1; while (retry <= options.maxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException exception) { if (connection == null) { return Mono.error(new RuntimeException(String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = 0; try { responseCode = connection.getResponseCode(); } catch (IOException e) { return Mono.error(e); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.retryTimeout().apply(RANDOM.nextInt(retry)); retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.maxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { return Mono.error(new RuntimeException("Couldn't acquire access token from IMDS, verify your objectId, clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } return Mono.error(new RuntimeException(String.format("MSI: Failed to acquire tokens after retrying %s times", options.maxRetry()))); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new RuntimeException(ex); } } }
class IdentityClient { private final IdentityClientOptions options; private final SerializerAdapter adapter = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); /** * Creates an IdentityClient with default options. */ public IdentityClient() { this.options = new IdentityClientOptions(); } /** * Creates an IdentityClient with the given options. * @param options the options configuring the client. */ public IdentityClient(IdentityClientOptions options) { this.options = options; } /** * Asynchronously acquire a token from Active Directory with a client secret. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param clientSecret the client secret of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String tenantId, String clientId, String clientSecret, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { context.acquireToken( resource, new ClientCredential(clientId, clientSecret), Adal4jUtil.authenticationDelegate(callback)); }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String tenantId, String clientId, String pfxCertificatePath, String pfxCertificatePassword, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, Adal4jUtil.createAsymmetricKeyCredential(clientId, Files.readAllBytes(Paths.get(pfxCertificatePath)), pfxCertificatePassword), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pemCertificatePath the path to the PEM certificate of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String tenantId, String clientId, String pemCertificatePath, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, AsymmetricKeyCredential.create(clientId, Adal4jUtil.privateKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath))), Adal4jUtil.publicKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath)))), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } private static AuthenticationContext createAuthenticationContext(ExecutorService executor, String authorityUrl, ProxyOptions proxyOptions) { AuthenticationContext context; try { context = new AuthenticationContext(authorityUrl, false, executor); } catch (MalformedURLException mue) { throw Exceptions.propagate(mue); } if (proxyOptions != null) { context.setProxy(new Proxy(proxyOptions.type() == Type.HTTP ? Proxy.Type.HTTP : Proxy.Type.SOCKS, proxyOptions.address())); } return context; } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param clientId the client ID of the application service * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * @param clientId the client ID of the virtual machine * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } int retry = 1; while (retry <= options.maxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException exception) { if (connection == null) { return Mono.error(new RuntimeException(String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = 0; try { responseCode = connection.getResponseCode(); } catch (IOException e) { return Mono.error(e); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.retryTimeout().apply(RANDOM.nextInt(retry)); retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.maxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { return Mono.error(new RuntimeException("Couldn't acquire access token from IMDS, verify your objectId, clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } return Mono.error(new RuntimeException(String.format("MSI: Failed to acquire tokens after retrying %s times", options.maxRetry()))); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new RuntimeException(ex); } } }
Same issue here in code that isn't changed - excessive string builder operations that could be made simpler
public Mono<AccessToken> authenticateToIMDSEndpoint(String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version"); payload.append("="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&"); payload.append("resource"); payload.append("="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&"); payload.append("client_id"); payload.append("="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } int retry = 1; while (retry <= options.maxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException exception) { if (connection == null) { return Mono.error(new RuntimeException(String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = 0; try { responseCode = connection.getResponseCode(); } catch (IOException e) { return Mono.error(e); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.retryTimeout().apply(RANDOM.nextInt(retry)); retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.maxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { return Mono.error(new RuntimeException("Couldn't acquire access token from IMDS, verify your objectId, clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } return Mono.error(new RuntimeException(String.format("MSI: Failed to acquire tokens after retrying %s times", options.maxRetry()))); }
payload.append(URLEncoder.encode(clientId, "UTF-8"));
public Mono<AccessToken> authenticateToIMDSEndpoint(String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } int retry = 1; while (retry <= options.maxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException exception) { if (connection == null) { return Mono.error(new RuntimeException(String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = 0; try { responseCode = connection.getResponseCode(); } catch (IOException e) { return Mono.error(e); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.retryTimeout().apply(RANDOM.nextInt(retry)); retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.maxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { return Mono.error(new RuntimeException("Couldn't acquire access token from IMDS, verify your objectId, clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } return Mono.error(new RuntimeException(String.format("MSI: Failed to acquire tokens after retrying %s times", options.maxRetry()))); }
class IdentityClient { private final IdentityClientOptions options; private final SerializerAdapter adapter = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); /** * Creates an IdentityClient with default options. */ public IdentityClient() { this.options = new IdentityClientOptions(); } /** * Creates an IdentityClient with the given options. * @param options the options configuring the client. */ public IdentityClient(IdentityClientOptions options) { this.options = options; } /** * Asynchronously acquire a token from Active Directory with a client secret. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param clientSecret the client secret of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String tenantId, String clientId, String clientSecret, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { context.acquireToken( resource, new ClientCredential(clientId, clientSecret), Adal4jUtil.authenticationDelegate(callback)); }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String tenantId, String clientId, String pfxCertificatePath, String pfxCertificatePassword, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, Adal4jUtil.createAsymmetricKeyCredential(clientId, Files.readAllBytes(Paths.get(pfxCertificatePath)), pfxCertificatePassword), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pemCertificatePath the path to the PEM certificate of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String tenantId, String clientId, String pemCertificatePath, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, AsymmetricKeyCredential.create(clientId, Adal4jUtil.privateKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath))), Adal4jUtil.publicKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath)))), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } private static AuthenticationContext createAuthenticationContext(ExecutorService executor, String authorityUrl, ProxyOptions proxyOptions) { AuthenticationContext context; try { context = new AuthenticationContext(authorityUrl, false, executor); } catch (MalformedURLException mue) { throw Exceptions.propagate(mue); } if (proxyOptions != null) { context.setProxy(new Proxy(proxyOptions.type() == Type.HTTP ? Proxy.Type.HTTP : Proxy.Type.SOCKS, proxyOptions.address())); } return context; } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param clientId the client ID of the application service * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEnpoint(String msiEndpoint, String msiSecret, String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); HttpURLConnection connection = null; try { StringBuilder urlStringBuilder = new StringBuilder(String.format("%s?resource=%s&api-version=2017-09-01", msiEndpoint, resource)); if (clientId != null) { urlStringBuilder.append("&"); urlStringBuilder.append("client_id"); urlStringBuilder.append("="); urlStringBuilder.append(clientId); } URL url = new URL(urlStringBuilder.toString()); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Secret", msiSecret); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * @param clientId the client ID of the virtual machine * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new RuntimeException(ex); } } }
class IdentityClient { private final IdentityClientOptions options; private final SerializerAdapter adapter = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); /** * Creates an IdentityClient with default options. */ public IdentityClient() { this.options = new IdentityClientOptions(); } /** * Creates an IdentityClient with the given options. * @param options the options configuring the client. */ public IdentityClient(IdentityClientOptions options) { this.options = options; } /** * Asynchronously acquire a token from Active Directory with a client secret. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param clientSecret the client secret of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String tenantId, String clientId, String clientSecret, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { context.acquireToken( resource, new ClientCredential(clientId, clientSecret), Adal4jUtil.authenticationDelegate(callback)); }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String tenantId, String clientId, String pfxCertificatePath, String pfxCertificatePassword, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, Adal4jUtil.createAsymmetricKeyCredential(clientId, Files.readAllBytes(Paths.get(pfxCertificatePath)), pfxCertificatePassword), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pemCertificatePath the path to the PEM certificate of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String tenantId, String clientId, String pemCertificatePath, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, AsymmetricKeyCredential.create(clientId, Adal4jUtil.privateKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath))), Adal4jUtil.publicKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath)))), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } private static AuthenticationContext createAuthenticationContext(ExecutorService executor, String authorityUrl, ProxyOptions proxyOptions) { AuthenticationContext context; try { context = new AuthenticationContext(authorityUrl, false, executor); } catch (MalformedURLException mue) { throw Exceptions.propagate(mue); } if (proxyOptions != null) { context.setProxy(new Proxy(proxyOptions.type() == Type.HTTP ? Proxy.Type.HTTP : Proxy.Type.SOCKS, proxyOptions.address())); } return context; } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param clientId the client ID of the application service * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToManagedIdentityEnpoint(String msiEndpoint, String msiSecret, String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Secret", msiSecret); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } } /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * @param clientId the client ID of the virtual machine * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new RuntimeException(ex); } } }
>String.format("%s?resource=%s&api-version=2017-09-01", msiEndpoint, resource) [](start = 63, length = 77) Not sure if URL handles this but it's most likely that the resource needs to be url encoded as a data string.
public Mono<AccessToken> authenticateToManagedIdentityEnpoint(String msiEndpoint, String msiSecret, String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); HttpURLConnection connection = null; try { StringBuilder urlStringBuilder = new StringBuilder(String.format("%s?resource=%s&api-version=2017-09-01", msiEndpoint, resource)); if (clientId != null) { urlStringBuilder.append("&"); urlStringBuilder.append("client_id"); urlStringBuilder.append("="); urlStringBuilder.append(clientId); } URL url = new URL(urlStringBuilder.toString()); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Secret", msiSecret); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } }
StringBuilder urlStringBuilder = new StringBuilder(String.format("%s?resource=%s&api-version=2017-09-01", msiEndpoint, resource));
public Mono<AccessToken> authenticateToManagedIdentityEnpoint(String msiEndpoint, String msiSecret, String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Secret", msiSecret); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } }
class IdentityClient { private final IdentityClientOptions options; private final SerializerAdapter adapter = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); /** * Creates an IdentityClient with default options. */ public IdentityClient() { this.options = new IdentityClientOptions(); } /** * Creates an IdentityClient with the given options. * @param options the options configuring the client. */ public IdentityClient(IdentityClientOptions options) { this.options = options; } /** * Asynchronously acquire a token from Active Directory with a client secret. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param clientSecret the client secret of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String tenantId, String clientId, String clientSecret, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { context.acquireToken( resource, new ClientCredential(clientId, clientSecret), Adal4jUtil.authenticationDelegate(callback)); }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String tenantId, String clientId, String pfxCertificatePath, String pfxCertificatePassword, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, Adal4jUtil.createAsymmetricKeyCredential(clientId, Files.readAllBytes(Paths.get(pfxCertificatePath)), pfxCertificatePassword), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pemCertificatePath the path to the PEM certificate of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String tenantId, String clientId, String pemCertificatePath, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, AsymmetricKeyCredential.create(clientId, Adal4jUtil.privateKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath))), Adal4jUtil.publicKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath)))), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } private static AuthenticationContext createAuthenticationContext(ExecutorService executor, String authorityUrl, ProxyOptions proxyOptions) { AuthenticationContext context; try { context = new AuthenticationContext(authorityUrl, false, executor); } catch (MalformedURLException mue) { throw Exceptions.propagate(mue); } if (proxyOptions != null) { context.setProxy(new Proxy(proxyOptions.type() == Type.HTTP ? Proxy.Type.HTTP : Proxy.Type.SOCKS, proxyOptions.address())); } return context; } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param clientId the client ID of the application service * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * @param clientId the client ID of the virtual machine * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version"); payload.append("="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&"); payload.append("resource"); payload.append("="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&"); payload.append("client_id"); payload.append("="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } int retry = 1; while (retry <= options.maxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException exception) { if (connection == null) { return Mono.error(new RuntimeException(String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = 0; try { responseCode = connection.getResponseCode(); } catch (IOException e) { return Mono.error(e); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.retryTimeout().apply(RANDOM.nextInt(retry)); retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.maxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { return Mono.error(new RuntimeException("Couldn't acquire access token from IMDS, verify your objectId, clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } return Mono.error(new RuntimeException(String.format("MSI: Failed to acquire tokens after retrying %s times", options.maxRetry()))); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new RuntimeException(ex); } } }
class IdentityClient { private final IdentityClientOptions options; private final SerializerAdapter adapter = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); /** * Creates an IdentityClient with default options. */ public IdentityClient() { this.options = new IdentityClientOptions(); } /** * Creates an IdentityClient with the given options. * @param options the options configuring the client. */ public IdentityClient(IdentityClientOptions options) { this.options = options; } /** * Asynchronously acquire a token from Active Directory with a client secret. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param clientSecret the client secret of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String tenantId, String clientId, String clientSecret, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { context.acquireToken( resource, new ClientCredential(clientId, clientSecret), Adal4jUtil.authenticationDelegate(callback)); }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String tenantId, String clientId, String pfxCertificatePath, String pfxCertificatePassword, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, Adal4jUtil.createAsymmetricKeyCredential(clientId, Files.readAllBytes(Paths.get(pfxCertificatePath)), pfxCertificatePassword), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pemCertificatePath the path to the PEM certificate of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String tenantId, String clientId, String pemCertificatePath, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, AsymmetricKeyCredential.create(clientId, Adal4jUtil.privateKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath))), Adal4jUtil.publicKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath)))), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } private static AuthenticationContext createAuthenticationContext(ExecutorService executor, String authorityUrl, ProxyOptions proxyOptions) { AuthenticationContext context; try { context = new AuthenticationContext(authorityUrl, false, executor); } catch (MalformedURLException mue) { throw Exceptions.propagate(mue); } if (proxyOptions != null) { context.setProxy(new Proxy(proxyOptions.type() == Type.HTTP ? Proxy.Type.HTTP : Proxy.Type.SOCKS, proxyOptions.address())); } return context; } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param clientId the client ID of the application service * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * @param clientId the client ID of the virtual machine * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } int retry = 1; while (retry <= options.maxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException exception) { if (connection == null) { return Mono.error(new RuntimeException(String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = 0; try { responseCode = connection.getResponseCode(); } catch (IOException e) { return Mono.error(e); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.retryTimeout().apply(RANDOM.nextInt(retry)); retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.maxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { return Mono.error(new RuntimeException("Couldn't acquire access token from IMDS, verify your objectId, clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } return Mono.error(new RuntimeException(String.format("MSI: Failed to acquire tokens after retrying %s times", options.maxRetry()))); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new RuntimeException(ex); } } }
Updated. StringBuilder not needed here anymore
public Mono<AccessToken> authenticateToManagedIdentityEnpoint(String msiEndpoint, String msiSecret, String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); HttpURLConnection connection = null; try { StringBuilder urlStringBuilder = new StringBuilder(String.format("%s?resource=%s&api-version=2017-09-01", msiEndpoint, resource)); if (clientId != null) { urlStringBuilder.append("&"); urlStringBuilder.append("client_id"); urlStringBuilder.append("="); urlStringBuilder.append(clientId); } URL url = new URL(urlStringBuilder.toString()); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Secret", msiSecret); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } }
StringBuilder urlStringBuilder = new StringBuilder(String.format("%s?resource=%s&api-version=2017-09-01", msiEndpoint, resource));
public Mono<AccessToken> authenticateToManagedIdentityEnpoint(String msiEndpoint, String msiSecret, String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); HttpURLConnection connection = null; StringBuilder payload = new StringBuilder(); try { payload.append("resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); payload.append("&api-version="); payload.append(URLEncoder.encode("2017-09-01", "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } try { URL url = new URL(String.format("%s?%s", msiEndpoint, payload)); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Secret", msiSecret); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON); } catch (IOException e) { return Mono.error(e); } finally { if (connection != null) { connection.disconnect(); } } }
class IdentityClient { private final IdentityClientOptions options; private final SerializerAdapter adapter = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); /** * Creates an IdentityClient with default options. */ public IdentityClient() { this.options = new IdentityClientOptions(); } /** * Creates an IdentityClient with the given options. * @param options the options configuring the client. */ public IdentityClient(IdentityClientOptions options) { this.options = options; } /** * Asynchronously acquire a token from Active Directory with a client secret. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param clientSecret the client secret of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String tenantId, String clientId, String clientSecret, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { context.acquireToken( resource, new ClientCredential(clientId, clientSecret), Adal4jUtil.authenticationDelegate(callback)); }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String tenantId, String clientId, String pfxCertificatePath, String pfxCertificatePassword, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, Adal4jUtil.createAsymmetricKeyCredential(clientId, Files.readAllBytes(Paths.get(pfxCertificatePath)), pfxCertificatePassword), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pemCertificatePath the path to the PEM certificate of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String tenantId, String clientId, String pemCertificatePath, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, AsymmetricKeyCredential.create(clientId, Adal4jUtil.privateKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath))), Adal4jUtil.publicKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath)))), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } private static AuthenticationContext createAuthenticationContext(ExecutorService executor, String authorityUrl, ProxyOptions proxyOptions) { AuthenticationContext context; try { context = new AuthenticationContext(authorityUrl, false, executor); } catch (MalformedURLException mue) { throw Exceptions.propagate(mue); } if (proxyOptions != null) { context.setProxy(new Proxy(proxyOptions.type() == Type.HTTP ? Proxy.Type.HTTP : Proxy.Type.SOCKS, proxyOptions.address())); } return context; } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param clientId the client ID of the application service * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * @param clientId the client ID of the virtual machine * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version"); payload.append("="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&"); payload.append("resource"); payload.append("="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&"); payload.append("client_id"); payload.append("="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } int retry = 1; while (retry <= options.maxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException exception) { if (connection == null) { return Mono.error(new RuntimeException(String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = 0; try { responseCode = connection.getResponseCode(); } catch (IOException e) { return Mono.error(e); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.retryTimeout().apply(RANDOM.nextInt(retry)); retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.maxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { return Mono.error(new RuntimeException("Couldn't acquire access token from IMDS, verify your objectId, clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } return Mono.error(new RuntimeException(String.format("MSI: Failed to acquire tokens after retrying %s times", options.maxRetry()))); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new RuntimeException(ex); } } }
class IdentityClient { private final IdentityClientOptions options; private final SerializerAdapter adapter = JacksonAdapter.createDefaultSerializerAdapter(); private static final Random RANDOM = new Random(); /** * Creates an IdentityClient with default options. */ public IdentityClient() { this.options = new IdentityClientOptions(); } /** * Creates an IdentityClient with the given options. * @param options the options configuring the client. */ public IdentityClient(IdentityClientOptions options) { this.options = options; } /** * Asynchronously acquire a token from Active Directory with a client secret. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param clientSecret the client secret of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithClientSecret(String tenantId, String clientId, String clientSecret, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { context.acquireToken( resource, new ClientCredential(clientId, clientSecret), Adal4jUtil.authenticationDelegate(callback)); }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PKCS12 certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pfxCertificatePath the path to the PKCS12 certificate of the application * @param pfxCertificatePassword the password protecting the PFX certificate * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPfxCertificate(String tenantId, String clientId, String pfxCertificatePath, String pfxCertificatePassword, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, Adal4jUtil.createAsymmetricKeyCredential(clientId, Files.readAllBytes(Paths.get(pfxCertificatePath)), pfxCertificatePassword), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } /** * Asynchronously acquire a token from Active Directory with a PEM certificate. * @param tenantId the tenant ID of the application * @param clientId the client ID of the application * @param pemCertificatePath the path to the PEM certificate of the application * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateWithPemCertificate(String tenantId, String clientId, String pemCertificatePath, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); String authorityUrl = options.authorityHost().replaceAll("/+$", "") + "/" + tenantId; ExecutorService executor = Executors.newSingleThreadExecutor(); AuthenticationContext context = createAuthenticationContext(executor, authorityUrl, options.proxyOptions()); return Mono.create((Consumer<MonoSink<AuthenticationResult>>) callback -> { try { context.acquireToken( resource, AsymmetricKeyCredential.create(clientId, Adal4jUtil.privateKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath))), Adal4jUtil.publicKeyFromPem(Files.readAllBytes(Paths.get(pemCertificatePath)))), Adal4jUtil.authenticationDelegate(callback)); } catch (IOException e) { callback.error(e); } }).map(ar -> new AccessToken(ar.getAccessToken(), OffsetDateTime.ofInstant(ar.getExpiresOnDate().toInstant(), ZoneOffset.UTC))) .doFinally(s -> executor.shutdown()); } private static AuthenticationContext createAuthenticationContext(ExecutorService executor, String authorityUrl, ProxyOptions proxyOptions) { AuthenticationContext context; try { context = new AuthenticationContext(authorityUrl, false, executor); } catch (MalformedURLException mue) { throw Exceptions.propagate(mue); } if (proxyOptions != null) { context.setProxy(new Proxy(proxyOptions.type() == Type.HTTP ? Proxy.Type.HTTP : Proxy.Type.SOCKS, proxyOptions.address())); } return context; } /** * Asynchronously acquire a token from the App Service Managed Service Identity endpoint. * @param msiEndpoint the endpoint to acquire token from * @param msiSecret the secret to acquire token with * @param clientId the client ID of the application service * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ /** * Asynchronously acquire a token from the Virtual Machine IMDS endpoint. * @param clientId the client ID of the virtual machine * @param scopes the scopes to authenticate to * @return a Publisher that emits an AccessToken */ public Mono<AccessToken> authenticateToIMDSEndpoint(String clientId, String[] scopes) { String resource = ScopeUtil.scopesToResource(scopes); StringBuilder payload = new StringBuilder(); final int imdsUpgradeTimeInMs = 70 * 1000; try { payload.append("api-version="); payload.append(URLEncoder.encode("2018-02-01", "UTF-8")); payload.append("&resource="); payload.append(URLEncoder.encode(resource, "UTF-8")); if (clientId != null) { payload.append("&client_id="); payload.append(URLEncoder.encode(clientId, "UTF-8")); } } catch (IOException exception) { return Mono.error(exception); } int retry = 1; while (retry <= options.maxRetry()) { URL url = null; HttpURLConnection connection = null; try { url = new URL(String.format("http: connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setRequestProperty("Metadata", "true"); connection.connect(); Scanner s = new Scanner(connection.getInputStream(), StandardCharsets.UTF_8.name()).useDelimiter("\\A"); String result = s.hasNext() ? s.next() : ""; return Mono.just(adapter.deserialize(result, MSIToken.class, SerializerEncoding.JSON)); } catch (IOException exception) { if (connection == null) { return Mono.error(new RuntimeException(String.format("Could not connect to the url: %s.", url), exception)); } int responseCode = 0; try { responseCode = connection.getResponseCode(); } catch (IOException e) { return Mono.error(e); } if (responseCode == 410 || responseCode == 429 || responseCode == 404 || (responseCode >= 500 && responseCode <= 599)) { int retryTimeoutInMs = options.retryTimeout().apply(RANDOM.nextInt(retry)); retryTimeoutInMs = (responseCode == 410 && retryTimeoutInMs < imdsUpgradeTimeInMs) ? imdsUpgradeTimeInMs : retryTimeoutInMs; retry++; if (retry > options.maxRetry()) { break; } else { sleep(retryTimeoutInMs); } } else { return Mono.error(new RuntimeException("Couldn't acquire access token from IMDS, verify your objectId, clientId or msiResourceId", exception)); } } finally { if (connection != null) { connection.disconnect(); } } } return Mono.error(new RuntimeException(String.format("MSI: Failed to acquire tokens after retrying %s times", options.maxRetry()))); } private static void sleep(int millis) { try { Thread.sleep(millis); } catch (InterruptedException ex) { throw new RuntimeException(ex); } } }
Your string replace has done too much, hasn't it? This is still a secrets sample?
public static void main(String[] args) throws IllegalArgumentException { SecretClient client = SecretClient.builder() .endpoint("https: .credential(new AzureCredential()) .build(); client.setSecret(new Secret("StorageAccountPassword", "f4G34fMh8v-fdsgjsk2323=-asdsdfsdf") .expires(OffsetDateTime.now().plusYears(1))); client.setSecret(new Secret("BankAccountPassword", "f4G34fMh8v") .expires(OffsetDateTime.now().plusYears(1))); for (SecretBase secret : client.listSecrets()) { Secret secretWithValue = client.getSecret(secret).value(); System.out.printf("Received secret with name %s and value %s \n", secretWithValue.name(), secretWithValue.value()); } client.setSecret("BankAccountPassword", "sskdjfsdasdjsd"); for (SecretBase secret : client.listSecretVersions("BankAccountPassword")) { Secret secretWithValue = client.getSecret(secret).value(); System.out.printf("Received secret's version with name %s and value %s", secretWithValue.name(), secretWithValue.value()); } }
public static void main(String[] args) throws IllegalArgumentException { SecretClient client = SecretClient.builder() .endpoint("https: .credential(new DefaultAzureCredential()) .build(); client.setSecret(new Secret("StorageAccountPassword", "f4G34fMh8v-fdsgjsk2323=-asdsdfsdf") .expires(OffsetDateTime.now().plusYears(1))); client.setSecret(new Secret("BankAccountPassword", "f4G34fMh8v") .expires(OffsetDateTime.now().plusYears(1))); for (SecretBase secret : client.listSecrets()) { Secret secretWithValue = client.getSecret(secret).value(); System.out.printf("Received secret with name %s and value %s \n", secretWithValue.name(), secretWithValue.value()); } client.setSecret("BankAccountPassword", "sskdjfsdasdjsd"); for (SecretBase secret : client.listSecretVersions("BankAccountPassword")) { Secret secretWithValue = client.getSecret(secret).value(); System.out.printf("Received secret's version with name %s and value %s", secretWithValue.name(), secretWithValue.value()); } }
class ListOperations { /** * Authenticates with the key vault and shows how to list keys and list versions of a specific secret in the key vault. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when invalid key vault endpoint is passed. */ }
class ListOperations { /** * Authenticates with the key vault and shows how to list secrets and list versions of a specific secret in the key vault. * * @param args Unused. Arguments to the program. * @throws IllegalArgumentException when invalid key vault endpoint is passed. */ }
Just use `System.out.printf`, rather than String.format inside a println
public static void main(String[] args) throws InterruptedException { Semaphore semaphore = new Semaphore(1); String connectionString = "Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};SharedAccessKey={sharedAccessKey};EntityPath={eventHubPath}"; EventHubClient client = new EventHubClientBuilder() .connectionString(connectionString) .build(); semaphore.acquire(); client.getPartitionIds().flatMap(partitionId -> client.getPartitionProperties(partitionId)) .subscribe(properties -> { System.out.println("The Event Hub has the following properties:"); System.out.println(String.format( "Event Hub Name: %s; Partition Id: %s; Is partition empty? %s; First Sequence Number: %s; " + "Last Enqueued Time: %s; Last Enqueued Sequence Number: %s; Last Enqueued Offset: %s", properties.eventHubPath(), properties.id(), properties.isEmpty(), properties.beginningSequenceNumber(), properties.lastEnqueuedTime(), properties.lastEnqueuedSequenceNumber(), properties.lastEnqueuedOffset())); }, error -> { System.err.println("Error occurred while fetching partition properties: " + error.toString()); }, () -> { semaphore.release(); }); System.out.println("Waiting for partition properties to complete..."); semaphore.acquire(); System.out.println("Finished."); }
System.out.println(String.format(
public static void main(String[] args) throws InterruptedException { Semaphore semaphore = new Semaphore(1); String connectionString = "Endpoint={endpoint};SharedAccessKeyName={sharedAccessKeyName};SharedAccessKey={sharedAccessKey};EntityPath={eventHubPath}"; EventHubClient client = new EventHubClientBuilder() .connectionString(connectionString) .buildAsyncClient(); semaphore.acquire(); client.getPartitionIds().flatMap(partitionId -> client.getPartitionProperties(partitionId)) .subscribe(properties -> { System.out.println("The Event Hub has the following properties:"); System.out.printf( "Event Hub Name: %s; Partition Id: %s; Is partition empty? %s; First Sequence Number: %s; " + "Last Enqueued Time: %s; Last Enqueued Sequence Number: %s; Last Enqueued Offset: %s \n", properties.eventHubPath(), properties.id(), properties.isEmpty(), properties.beginningSequenceNumber(), properties.lastEnqueuedTime(), properties.lastEnqueuedSequenceNumber(), properties.lastEnqueuedOffset()); }, error -> { System.err.println("Error occurred while fetching partition properties: " + error.toString()); }, () -> { semaphore.release(); }); System.out.println("Waiting for partition properties to complete..."); semaphore.acquire(); System.out.println("Finished."); }
class GetEventHubMetadata { /** * Demonstrates how to get metadata from an Event Hub's partitions. * * @param args Unused arguments to the sample. * @throws InterruptedException if the semaphore could not be acquired. */ }
class GetEventHubMetadata { /** * Demonstrates how to get metadata from an Event Hub's partitions. * * @param args Unused arguments to the sample. * @throws InterruptedException if the semaphore could not be acquired. */ }
Do a static import of OperationStatus so you don't need to fully-qualify the `PollResponse.` part.
public void initialise(String otherStatus, T value) { PollResponse<String> inProgressPollResponse = new PollResponse<>(PollResponse.OperationStatus.fromString("CUSTOM_OTHER_STATUS"), "my custom response"); }
= new PollResponse<>(PollResponse.OperationStatus.fromString("CUSTOM_OTHER_STATUS"), "my custom response");
public void initialise(String otherStatus, T value) { PollResponse<String> inProgressPollResponse = new PollResponse<>(OperationStatus.fromString("CUSTOM_OTHER_STATUS"), "my custom response"); }
class PollResponseJavaDocCodeSnippets<T> { /** * * @param otherStatus v * @param value v */ /** * * @param otherStatus v * @param value v * @param retryAfterDuration v */ public void initialise(String otherStatus, T value, Duration retryAfterDuration) { PollResponse<String> inProgressPollResponse = new PollResponse<>(PollResponse.OperationStatus.fromString("CUSTOM_OTHER_STATUS"), "my custom response", Duration.ofMillis(5000)); } /** * initialise * @param status v * @param value v */ public void initialise(PollResponse.OperationStatus status, T value) { PollResponse<String> inProgressPollResponse = new PollResponse<>(PollResponse.OperationStatus.IN_PROGRESS, "my custom response"); } /** * Initialise and subscribe snippet * @param status v * @param value v * @param retryAfter v * @param properties v */ public void initialise(PollResponse.OperationStatus status, T value, Duration retryAfter, Map<Object, Object> properties) { Map<Object, Object> prop = new HashMap<>(); prop.put("service.url", "http: prop.put("customer.id", 2635342); PollResponse<String> inProgressPollResponse = new PollResponse<>(PollResponse.OperationStatus.IN_PROGRESS, "mycustom response", Duration.ofMillis(2000), prop); } /** * Initialise and subscribe snippet * @param status v * @param value v * @param retryAfter v */ public void initialise(PollResponse.OperationStatus status, T value, Duration retryAfter) { PollResponse<String> inProgressPollResponse = new PollResponse<>(PollResponse.OperationStatus.IN_PROGRESS, "my custom response", Duration.ofMillis(2000)); } }
class PollResponseJavaDocCodeSnippets<T> { /** * * @param otherStatus v * @param value v */ /** * * @param otherStatus v * @param value v * @param retryAfterDuration v */ public void initialise(String otherStatus, T value, Duration retryAfterDuration) { PollResponse<String> inProgressPollResponse = new PollResponse<>(OperationStatus.fromString("CUSTOM_OTHER_STATUS"), "my custom response", Duration.ofMillis(5000)); } /** * initialise * @param status v * @param value v */ public void initialise(OperationStatus status, T value) { PollResponse<String> inProgressPollResponse = new PollResponse<>(OperationStatus.IN_PROGRESS, "my custom response"); PollResponse<String> pollResponseWithCustomStatus = new PollResponse<>(OperationStatus.fromString("OTHER_CUSTOM_STATUS"), "my custom status response"); } /** * Initialise and subscribe snippet * @param status v * @param value v * @param retryAfter v * @param properties v */ public void initialise(OperationStatus status, T value, Duration retryAfter, Map<Object, Object> properties) { Map<Object, Object> prop = new HashMap<>(); prop.put("service.url", "http: prop.put("customer.id", 2635342); PollResponse<String> inProgressPollResponse = new PollResponse<>(OperationStatus.IN_PROGRESS, "mycustom response", Duration.ofMillis(2000), prop); } /** * Initialise and subscribe snippet * @param status v * @param value v * @param retryAfter v */ public void initialise(OperationStatus status, T value, Duration retryAfter) { PollResponse<String> inProgressPollResponse = new PollResponse<>(OperationStatus.IN_PROGRESS, "my custom response", Duration.ofMillis(2000)); } }
Fix up these as well so you don't specify `PollResponse.`
public void initialise(String otherStatus, T value, Duration retryAfterDuration) { PollResponse<String> inProgressPollResponse = new PollResponse<>(PollResponse.OperationStatus.fromString("CUSTOM_OTHER_STATUS"), "my custom response", Duration.ofMillis(5000)); }
= new PollResponse<>(PollResponse.OperationStatus.fromString("CUSTOM_OTHER_STATUS"),
public void initialise(String otherStatus, T value, Duration retryAfterDuration) { PollResponse<String> inProgressPollResponse = new PollResponse<>(OperationStatus.fromString("CUSTOM_OTHER_STATUS"), "my custom response", Duration.ofMillis(5000)); }
class PollResponseJavaDocCodeSnippets<T> { /** * * @param otherStatus v * @param value v */ public void initialise(String otherStatus, T value) { PollResponse<String> inProgressPollResponse = new PollResponse<>(PollResponse.OperationStatus.fromString("CUSTOM_OTHER_STATUS"), "my custom response"); } /** * * @param otherStatus v * @param value v * @param retryAfterDuration v */ /** * initialise * @param status v * @param value v */ public void initialise(PollResponse.OperationStatus status, T value) { PollResponse<String> inProgressPollResponse = new PollResponse<>(PollResponse.OperationStatus.IN_PROGRESS, "my custom response"); } /** * Initialise and subscribe snippet * @param status v * @param value v * @param retryAfter v * @param properties v */ public void initialise(PollResponse.OperationStatus status, T value, Duration retryAfter, Map<Object, Object> properties) { Map<Object, Object> prop = new HashMap<>(); prop.put("service.url", "http: prop.put("customer.id", 2635342); PollResponse<String> inProgressPollResponse = new PollResponse<>(PollResponse.OperationStatus.IN_PROGRESS, "mycustom response", Duration.ofMillis(2000), prop); } /** * Initialise and subscribe snippet * @param status v * @param value v * @param retryAfter v */ public void initialise(PollResponse.OperationStatus status, T value, Duration retryAfter) { PollResponse<String> inProgressPollResponse = new PollResponse<>(PollResponse.OperationStatus.IN_PROGRESS, "my custom response", Duration.ofMillis(2000)); } }
class PollResponseJavaDocCodeSnippets<T> { /** * * @param otherStatus v * @param value v */ public void initialise(String otherStatus, T value) { PollResponse<String> inProgressPollResponse = new PollResponse<>(OperationStatus.fromString("CUSTOM_OTHER_STATUS"), "my custom response"); } /** * * @param otherStatus v * @param value v * @param retryAfterDuration v */ /** * initialise * @param status v * @param value v */ public void initialise(OperationStatus status, T value) { PollResponse<String> inProgressPollResponse = new PollResponse<>(OperationStatus.IN_PROGRESS, "my custom response"); PollResponse<String> pollResponseWithCustomStatus = new PollResponse<>(OperationStatus.fromString("OTHER_CUSTOM_STATUS"), "my custom status response"); } /** * Initialise and subscribe snippet * @param status v * @param value v * @param retryAfter v * @param properties v */ public void initialise(OperationStatus status, T value, Duration retryAfter, Map<Object, Object> properties) { Map<Object, Object> prop = new HashMap<>(); prop.put("service.url", "http: prop.put("customer.id", 2635342); PollResponse<String> inProgressPollResponse = new PollResponse<>(OperationStatus.IN_PROGRESS, "mycustom response", Duration.ofMillis(2000), prop); } /** * Initialise and subscribe snippet * @param status v * @param value v * @param retryAfter v */ public void initialise(OperationStatus status, T value, Duration retryAfter) { PollResponse<String> inProgressPollResponse = new PollResponse<>(OperationStatus.IN_PROGRESS, "my custom response", Duration.ofMillis(2000)); } }
We really need to rename max results. It's not intuitive that it limits the items per page of response. Not necessary for this review; I'm tracking that discussion elsewhere.
public static void main(String[] args) throws Exception { /** * From the Azure portal, get your Storage account's name and account key. */ String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /** * This example shows several common operations just to get you started. */ /** * Create a client that references a to-be-created container in your Azure Storage account. This returns a * ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient). * Note that container names require lowercase. */ ContainerClient containerClient = storageClient.getContainerClient("myjavacontainerbasic" + System.currentTimeMillis()); /** * Create a container in Storage blob account. */ containerClient.create(); /** * Create a client that references a to-be-created blob in your Azure Storage account's container. * This returns a BlockBlobClient object that wraps the blob's endpoint, credential and a request pipeline * (inherited from containerClient). Note that blob names can be mixed case. */ BlockBlobClient blobClient = containerClient.getBlockBlobClient("HelloWorld.txt"); String data = "Hello world!"; InputStream dataStream = new ByteArrayInputStream(data.getBytes()); /** * Create the blob with string (plain text) content. */ blobClient.upload(dataStream, data.length()); dataStream.close(); /** * Download the blob's content to output stream. */ OutputStream outputStream = new ByteArrayOutputStream(data.length()); blobClient.download(outputStream); outputStream.close(); /** * Verify that the blob data round-tripped correctly. */ if (!data.equals(outputStream.toString())) { throw new RuntimeException("The downloaded data does not match the uploaded data."); } /** * Create more blobs before listing. */ for(int i = 0; i < 3; i++) { String sampleData = "Samples"; InputStream dataInBlobs = new ByteArrayInputStream(sampleData.getBytes(Charset.defaultCharset())); containerClient.getBlockBlobClient("myblobsforlisting" + System.currentTimeMillis()) .upload(dataInBlobs, sampleData.length()); dataInBlobs.close(); } /** * List the blob(s) in our container. */ containerClient.listBlobsFlat(new ListBlobsOptions().maxResults(1), null) .forEach(blobItem -> { System.out.println("Blob name: " + blobItem.name() + ", Snapshot: " + blobItem.snapshot()); });; /** * Delete the blob we created earlier. */ blobClient.delete(); /** * Delete the container we created earlier. */ containerClient.delete(); }
containerClient.listBlobsFlat(new ListBlobsOptions().maxResults(1), null)
public static void main(String[] args) throws Exception { /** * From the Azure portal, get your Storage account's name and account key. */ String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /** * This example shows several common operations just to get you started. */ /** * Create a client that references a to-be-created container in your Azure Storage account. This returns a * ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient). * Note that container names require lowercase. */ ContainerClient containerClient = storageClient.getContainerClient("myjavacontainerbasic" + System.currentTimeMillis()); /** * Create a container in Storage blob account. */ containerClient.create(); /** * Create a client that references a to-be-created blob in your Azure Storage account's container. * This returns a BlockBlobClient object that wraps the blob's endpoint, credential and a request pipeline * (inherited from containerClient). Note that blob names can be mixed case. */ BlockBlobClient blobClient = containerClient.getBlockBlobClient("HelloWorld.txt"); String data = "Hello world!"; InputStream dataStream = new ByteArrayInputStream(data.getBytes()); /** * Create the blob with string (plain text) content. */ blobClient.upload(dataStream, data.length()); dataStream.close(); /** * Download the blob's content to output stream. */ int dataSize = (int) blobClient.getProperties().value().blobSize(); OutputStream outputStream = new ByteArrayOutputStream(dataSize); blobClient.download(outputStream); outputStream.close(); /** * Verify that the blob data round-tripped correctly. */ if (!data.equals(outputStream.toString())) { throw new RuntimeException("The downloaded data does not match the uploaded data."); } /** * Create more blobs before listing. */ for(int i = 0; i < 3; i++) { String sampleData = "Samples"; InputStream dataInBlobs = new ByteArrayInputStream(sampleData.getBytes(Charset.defaultCharset())); containerClient.getBlockBlobClient("myblobsforlisting" + System.currentTimeMillis()) .upload(dataInBlobs, sampleData.length()); dataInBlobs.close(); } /** * List the blob(s) in our container. */ containerClient.listBlobsFlat() .forEach(blobItem -> { System.out.println("Blob name: " + blobItem.name() + ", Snapshot: " + blobItem.snapshot()); });; /** * Delete the blob we created earlier. */ blobClient.delete(); /** * Delete the container we created earlier. */ containerClient.delete(); }
class BasicExample { private static String getAccountName() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_NAME"); } private static String getAccountKey() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_KEY"); } }
class BasicExample { }
I'm not convinced we need this extra complexity. Could we just call `logger.error("", runtimeException)` without changing level or modifying any of the other code?
public void logAndThrow(RuntimeException runtimeException) { if (runtimeException == null) { return; } int currentLevel = level; if (level == DISABLED_LEVEL) { level = ERROR_LEVEL; } if (canLogAtLevel(level)) { performLogging(runtimeException.getClass().getName(), false, runtimeException); } level = currentLevel; throw runtimeException; }
level = currentLevel;
public void logAndThrow(RuntimeException runtimeException) { if (runtimeException == null) { return; } if (canLogAtLevel(ERROR_LEVEL)) { logger.error(runtimeException.getMessage(), runtimeException); } throw runtimeException; }
class name using the {@link LoggerFactory}
class name using the {@link LoggerFactory}
It is good suggestion to use logger.error directly.
public void logAndThrow(RuntimeException runtimeException) { if (runtimeException == null) { return; } int currentLevel = level; if (level == DISABLED_LEVEL) { level = ERROR_LEVEL; } if (canLogAtLevel(level)) { performLogging(runtimeException.getClass().getName(), false, runtimeException); } level = currentLevel; throw runtimeException; }
level = currentLevel;
public void logAndThrow(RuntimeException runtimeException) { if (runtimeException == null) { return; } if (canLogAtLevel(ERROR_LEVEL)) { logger.error(runtimeException.getMessage(), runtimeException); } throw runtimeException; }
class name using the {@link LoggerFactory}
class name using the {@link LoggerFactory}
Can we use `runtimeException.getMessage()` instead of empty string?
public void logAndThrow(RuntimeException runtimeException) { if (runtimeException == null) { return; } if (canLogAtLevel(level)) { logger.error("", runtimeException); } throw runtimeException; }
logger.error("", runtimeException);
public void logAndThrow(RuntimeException runtimeException) { if (runtimeException == null) { return; } if (canLogAtLevel(ERROR_LEVEL)) { logger.error(runtimeException.getMessage(), runtimeException); } throw runtimeException; }
class name using the {@link LoggerFactory}
class name using the {@link LoggerFactory}
This check won't always work as expected and won't comply with the changes made in PR #4194.
public void logAndThrow(RuntimeException runtimeException) { if (runtimeException == null) { return; } if (canLogAtLevel(level)) { logger.error("", runtimeException); } throw runtimeException; }
if (canLogAtLevel(level)) {
public void logAndThrow(RuntimeException runtimeException) { if (runtimeException == null) { return; } if (canLogAtLevel(ERROR_LEVEL)) { logger.error(runtimeException.getMessage(), runtimeException); } throw runtimeException; }
class name using the {@link LoggerFactory}
class name using the {@link LoggerFactory}
This is needed as the exception will be redacted from logging if the logging level doesn't include debugging/verbose logs.
public void logAndThrow(RuntimeException runtimeException) { if (runtimeException == null) { return; } if (canLogAtLevel(level)) { logger.error("", runtimeException); } throw runtimeException; }
logger.error("", runtimeException);
public void logAndThrow(RuntimeException runtimeException) { if (runtimeException == null) { return; } if (canLogAtLevel(ERROR_LEVEL)) { logger.error(runtimeException.getMessage(), runtimeException); } throw runtimeException; }
class name using the {@link LoggerFactory}
class name using the {@link LoggerFactory}
@hemanttanwar Did you reconcile what this comment means? It would be good to see a follow-up comment to clarify we're on the same page.
public void logAndThrow(RuntimeException runtimeException) { if (runtimeException == null) { return; } if (canLogAtLevel(level)) { logger.error("", runtimeException); } throw runtimeException; }
if (canLogAtLevel(level)) {
public void logAndThrow(RuntimeException runtimeException) { if (runtimeException == null) { return; } if (canLogAtLevel(ERROR_LEVEL)) { logger.error(runtimeException.getMessage(), runtimeException); } throw runtimeException; }
class name using the {@link LoggerFactory}
class name using the {@link LoggerFactory}
Spoke to @alzimmermsft and he is fine with the changes done to handle this case.
public void logAndThrow(RuntimeException runtimeException) { if (runtimeException == null) { return; } if (canLogAtLevel(level)) { logger.error("", runtimeException); } throw runtimeException; }
if (canLogAtLevel(level)) {
public void logAndThrow(RuntimeException runtimeException) { if (runtimeException == null) { return; } if (canLogAtLevel(ERROR_LEVEL)) { logger.error(runtimeException.getMessage(), runtimeException); } throw runtimeException; }
class name using the {@link LoggerFactory}
class name using the {@link LoggerFactory}
Would it make more sense to add an intermediate sample of getting the blob properties and retrieving the blobSize from BlobProperties to set the initial stream size? I would think that would be the more common way to determine this.
public static void main(String[] args) throws Exception { /** * From the Azure portal, get your Storage account's name and account key. */ String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /** * This example shows several common operations just to get you started. */ /** * Create a client that references a to-be-created container in your Azure Storage account. This returns a * ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient). * Note that container names require lowercase. */ ContainerClient containerClient = storageClient.getContainerClient("myjavacontainerbasic" + System.currentTimeMillis()); /** * Create a container in Storage blob account. */ containerClient.create(); /** * Create a client that references a to-be-created blob in your Azure Storage account's container. * This returns a BlockBlobClient object that wraps the blob's endpoint, credential and a request pipeline * (inherited from containerClient). Note that blob names can be mixed case. */ BlockBlobClient blobClient = containerClient.getBlockBlobClient("HelloWorld.txt"); String data = "Hello world!"; InputStream dataStream = new ByteArrayInputStream(data.getBytes()); /** * Create the blob with string (plain text) content. */ blobClient.upload(dataStream, data.length()); dataStream.close(); /** * Download the blob's content to output stream. */ OutputStream outputStream = new ByteArrayOutputStream(data.length()); blobClient.download(outputStream); outputStream.close(); /** * Verify that the blob data round-tripped correctly. */ if (!data.equals(outputStream.toString())) { throw new RuntimeException("The downloaded data does not match the uploaded data."); } /** * Create more blobs before listing. */ for(int i = 0; i < 3; i++) { String sampleData = "Samples"; InputStream dataInBlobs = new ByteArrayInputStream(sampleData.getBytes(Charset.defaultCharset())); containerClient.getBlockBlobClient("myblobsforlisting" + System.currentTimeMillis()) .upload(dataInBlobs, sampleData.length()); dataInBlobs.close(); } /** * List the blob(s) in our container. */ containerClient.listBlobsFlat(new ListBlobsOptions().maxResults(1), null) .forEach(blobItem -> { System.out.println("Blob name: " + blobItem.name() + ", Snapshot: " + blobItem.snapshot()); });; /** * Delete the blob we created earlier. */ blobClient.delete(); /** * Delete the container we created earlier. */ containerClient.delete(); }
OutputStream outputStream = new ByteArrayOutputStream(data.length());
public static void main(String[] args) throws Exception { /** * From the Azure portal, get your Storage account's name and account key. */ String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /** * This example shows several common operations just to get you started. */ /** * Create a client that references a to-be-created container in your Azure Storage account. This returns a * ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient). * Note that container names require lowercase. */ ContainerClient containerClient = storageClient.getContainerClient("myjavacontainerbasic" + System.currentTimeMillis()); /** * Create a container in Storage blob account. */ containerClient.create(); /** * Create a client that references a to-be-created blob in your Azure Storage account's container. * This returns a BlockBlobClient object that wraps the blob's endpoint, credential and a request pipeline * (inherited from containerClient). Note that blob names can be mixed case. */ BlockBlobClient blobClient = containerClient.getBlockBlobClient("HelloWorld.txt"); String data = "Hello world!"; InputStream dataStream = new ByteArrayInputStream(data.getBytes()); /** * Create the blob with string (plain text) content. */ blobClient.upload(dataStream, data.length()); dataStream.close(); /** * Download the blob's content to output stream. */ int dataSize = (int) blobClient.getProperties().value().blobSize(); OutputStream outputStream = new ByteArrayOutputStream(dataSize); blobClient.download(outputStream); outputStream.close(); /** * Verify that the blob data round-tripped correctly. */ if (!data.equals(outputStream.toString())) { throw new RuntimeException("The downloaded data does not match the uploaded data."); } /** * Create more blobs before listing. */ for(int i = 0; i < 3; i++) { String sampleData = "Samples"; InputStream dataInBlobs = new ByteArrayInputStream(sampleData.getBytes(Charset.defaultCharset())); containerClient.getBlockBlobClient("myblobsforlisting" + System.currentTimeMillis()) .upload(dataInBlobs, sampleData.length()); dataInBlobs.close(); } /** * List the blob(s) in our container. */ containerClient.listBlobsFlat() .forEach(blobItem -> { System.out.println("Blob name: " + blobItem.name() + ", Snapshot: " + blobItem.snapshot()); });; /** * Delete the blob we created earlier. */ blobClient.delete(); /** * Delete the container we created earlier. */ containerClient.delete(); }
class BasicExample { private static String getAccountName() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_NAME"); } private static String getAccountKey() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_KEY"); } }
class BasicExample { }
Using maxResults here is a bit misleading, that field will determine the number of results a single page will return. I think this should be removed.
public static void main(String[] args) throws Exception { /** * From the Azure portal, get your Storage account's name and account key. */ String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /** * This example shows several common operations just to get you started. */ /** * Create a client that references a to-be-created container in your Azure Storage account. This returns a * ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient). * Note that container names require lowercase. */ ContainerClient containerClient = storageClient.getContainerClient("myjavacontainerbasic" + System.currentTimeMillis()); /** * Create a container in Storage blob account. */ containerClient.create(); /** * Create a client that references a to-be-created blob in your Azure Storage account's container. * This returns a BlockBlobClient object that wraps the blob's endpoint, credential and a request pipeline * (inherited from containerClient). Note that blob names can be mixed case. */ BlockBlobClient blobClient = containerClient.getBlockBlobClient("HelloWorld.txt"); String data = "Hello world!"; InputStream dataStream = new ByteArrayInputStream(data.getBytes()); /** * Create the blob with string (plain text) content. */ blobClient.upload(dataStream, data.length()); dataStream.close(); /** * Download the blob's content to output stream. */ OutputStream outputStream = new ByteArrayOutputStream(data.length()); blobClient.download(outputStream); outputStream.close(); /** * Verify that the blob data round-tripped correctly. */ if (!data.equals(outputStream.toString())) { throw new RuntimeException("The downloaded data does not match the uploaded data."); } /** * Create more blobs before listing. */ for(int i = 0; i < 3; i++) { String sampleData = "Samples"; InputStream dataInBlobs = new ByteArrayInputStream(sampleData.getBytes(Charset.defaultCharset())); containerClient.getBlockBlobClient("myblobsforlisting" + System.currentTimeMillis()) .upload(dataInBlobs, sampleData.length()); dataInBlobs.close(); } /** * List the blob(s) in our container. */ containerClient.listBlobsFlat(new ListBlobsOptions().maxResults(1), null) .forEach(blobItem -> { System.out.println("Blob name: " + blobItem.name() + ", Snapshot: " + blobItem.snapshot()); });; /** * Delete the blob we created earlier. */ blobClient.delete(); /** * Delete the container we created earlier. */ containerClient.delete(); }
containerClient.listBlobsFlat(new ListBlobsOptions().maxResults(1), null)
public static void main(String[] args) throws Exception { /** * From the Azure portal, get your Storage account's name and account key. */ String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /** * This example shows several common operations just to get you started. */ /** * Create a client that references a to-be-created container in your Azure Storage account. This returns a * ContainerClient object that wraps the container's endpoint, credential and a request pipeline (inherited from storageClient). * Note that container names require lowercase. */ ContainerClient containerClient = storageClient.getContainerClient("myjavacontainerbasic" + System.currentTimeMillis()); /** * Create a container in Storage blob account. */ containerClient.create(); /** * Create a client that references a to-be-created blob in your Azure Storage account's container. * This returns a BlockBlobClient object that wraps the blob's endpoint, credential and a request pipeline * (inherited from containerClient). Note that blob names can be mixed case. */ BlockBlobClient blobClient = containerClient.getBlockBlobClient("HelloWorld.txt"); String data = "Hello world!"; InputStream dataStream = new ByteArrayInputStream(data.getBytes()); /** * Create the blob with string (plain text) content. */ blobClient.upload(dataStream, data.length()); dataStream.close(); /** * Download the blob's content to output stream. */ int dataSize = (int) blobClient.getProperties().value().blobSize(); OutputStream outputStream = new ByteArrayOutputStream(dataSize); blobClient.download(outputStream); outputStream.close(); /** * Verify that the blob data round-tripped correctly. */ if (!data.equals(outputStream.toString())) { throw new RuntimeException("The downloaded data does not match the uploaded data."); } /** * Create more blobs before listing. */ for(int i = 0; i < 3; i++) { String sampleData = "Samples"; InputStream dataInBlobs = new ByteArrayInputStream(sampleData.getBytes(Charset.defaultCharset())); containerClient.getBlockBlobClient("myblobsforlisting" + System.currentTimeMillis()) .upload(dataInBlobs, sampleData.length()); dataInBlobs.close(); } /** * List the blob(s) in our container. */ containerClient.listBlobsFlat() .forEach(blobItem -> { System.out.println("Blob name: " + blobItem.name() + ", Snapshot: " + blobItem.snapshot()); });; /** * Delete the blob we created earlier. */ blobClient.delete(); /** * Delete the container we created earlier. */ containerClient.delete(); }
class BasicExample { private static String getAccountName() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_NAME"); } private static String getAccountKey() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_KEY"); } }
class BasicExample { }
Since these are examples we should just use the long primitive instead of the Long object. When our API uses the Long object we should use Long.
public static void main(String[] args) throws Exception { /** * From the Azure portal, get your Storage account's name and account key. */ String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endPoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. * Now you can use the storageClient to perform various container and blob operations. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endPoint).credential(credential).buildClient(); /** * This example shows several common operations just to get you started. */ /** * Create a client that references a to-be-created container in your Azure Storage account. This returns a * ContainerClient uses the same endpoint, credential and pipeline from storageClient. * Note that container names require lowercase. */ ContainerClient containerClient = storageClient.getContainerClient("myjavacontainerparallelupload" + System.currentTimeMillis()); /** * Create a container in Storage blob account. */ containerClient.create(); /** * Create a BlockBlobClient object that wraps a blob's endpoint and a default pipeline, the blockBlobClient give us access to upload the file. */ String filename = "BigFile.bin"; BlockBlobClient blobClient = containerClient.getBlockBlobClient(filename); /** * Create the empty uploadFile and downloadFile. */ File largeFile = createTempEmptyFile(filename); File downloadFile = createTempEmptyFile("downloadFile.bin"); /** * Generate random things to uploadFile, which makes the file with size of 100MB. */ Long fileSize = 100 * 1024 * 1024L; createTempFileWithFileSize(largeFile, fileSize); /** * Upload the large file to storage blob. */ blobClient.uploadFromFile(largeFile.getPath()); /** * Download the large file from storage blob to the local downloadFile path. */ blobClient.downloadToFile(downloadFile.getPath()); /** * Check the files are same after the round-trip. */ if (Files.exists(downloadFile.toPath()) && Files.exists(largeFile.toPath())) { checkTwoFilesAreTheSame(largeFile, downloadFile); System.out.println("The file we upload is the same as the one we download."); } else { throw new RuntimeException("Did not find the upload or download file."); } /** * Clean up the local files and storage container. */ containerClient.delete(); Files.deleteIfExists(largeFile.toPath()); Files.deleteIfExists(downloadFile.toPath()); }
Long fileSize = 100 * 1024 * 1024L;
public static void main(String[] args) throws Exception { /** * From the Azure portal, get your Storage account's name and account key. */ String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endPoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. * Now you can use the storageClient to perform various container and blob operations. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endPoint).credential(credential).buildClient(); /** * This example shows several common operations just to get you started. */ /** * Create a client that references a to-be-created container in your Azure Storage account. This returns a * ContainerClient uses the same endpoint, credential and pipeline from storageClient. * Note that container names require lowercase. */ ContainerClient containerClient = storageClient.getContainerClient("myjavacontainerparallelupload" + System.currentTimeMillis()); /** * Create a container in Storage blob account. */ containerClient.create(); /** * Create a BlockBlobClient object that wraps a blob's endpoint and a default pipeline, the blockBlobClient give us access to upload the file. */ String filename = "BigFile.bin"; BlockBlobClient blobClient = containerClient.getBlockBlobClient(filename); /** * Create the empty uploadFile and downloadFile. */ File largeFile = createTempEmptyFile(filename); File downloadFile = createTempEmptyFile("downloadFile.bin"); /** * Generate random things to uploadFile, which makes the file with size of 100MB. */ long fileSize = 100 * 1024 * 1024L; createTempFileWithFileSize(largeFile, fileSize); /** * Upload the large file to storage blob. */ blobClient.uploadFromFile(largeFile.getPath()); /** * Download the large file from storage blob to the local downloadFile path. */ blobClient.downloadToFile(downloadFile.getPath()); /** * Check the files are same after the round-trip. */ if (Files.exists(downloadFile.toPath()) && Files.exists(largeFile.toPath())) { checkTwoFilesAreTheSame(largeFile, downloadFile); System.out.println("The file we upload is the same as the one we download."); } else { throw new RuntimeException("Did not find the upload or download file."); } /** * Clean up the local files and storage container. */ containerClient.delete(); Files.deleteIfExists(largeFile.toPath()); Files.deleteIfExists(downloadFile.toPath()); }
class FileTransferExample { private static final String LARGE_TEST_FOLDER = "test-large-files/"; private static File createTempEmptyFile(String fileName) throws Exception{ URL folderUrl = FileTransferExample.class.getClassLoader().getResource("."); File dirPath = new File(folderUrl.getPath() + LARGE_TEST_FOLDER); if (dirPath.exists() || dirPath.mkdir()) { File f = new File(folderUrl.getPath() + LARGE_TEST_FOLDER + fileName); if (!f.exists()) { f.createNewFile(); } return f; } else { throw new RuntimeException("Failed to create the large file dir."); } } private static void createTempFileWithFileSize(File f, Long size) throws Exception { RandomAccessFile raf = new RandomAccessFile(f, "rw"); raf.setLength(size); raf.close(); } private static void checkTwoFilesAreTheSame(File f1, File f2) throws Exception { String checksumUpload = getFileChecksum(f1); String checksumDownload = getFileChecksum(f2); if (checksumUpload.equals(checksumDownload)) { throw new RuntimeException("The file upload does not match the file download."); } } private static String getFileChecksum(File file) throws Exception { MessageDigest md = MessageDigest.getInstance("MD5"); try (FileInputStream fis = new FileInputStream(file); FileChannel ch = fis.getChannel()) { final ByteBuffer buf = ByteBuffer.allocateDirect(8192); int b = ch.read(buf); while (b != -1 && b != 0) { buf.flip(); final byte[] bytes = new byte[b]; buf.get(bytes); md.update(bytes, 0, b); buf.clear(); b = ch.read(buf); } ch.close(); fis.close(); return md.digest().toString(); } } private static String getAccountName() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_NAME"); } private static String getAccountKey() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_KEY"); } }
class FileTransferExample { private static final String LARGE_TEST_FOLDER = "test-large-files/"; private static File createTempEmptyFile(String fileName) throws Exception{ URL folderUrl = FileTransferExample.class.getClassLoader().getResource("."); File dirPath = new File(folderUrl.getPath() + LARGE_TEST_FOLDER); if (dirPath.exists() || dirPath.mkdir()) { File f = new File(folderUrl.getPath() + LARGE_TEST_FOLDER + fileName); if (!f.exists()) { f.createNewFile(); } return f; } else { throw new RuntimeException("Failed to create the large file dir."); } } private static void createTempFileWithFileSize(File f, long size) throws Exception { RandomAccessFile raf = new RandomAccessFile(f, "rw"); raf.setLength(size); raf.close(); } private static void checkTwoFilesAreTheSame(File f1, File f2) throws Exception { String checksumUpload = getFileChecksum(f1); String checksumDownload = getFileChecksum(f2); if (checksumUpload.equals(checksumDownload)) { throw new RuntimeException("The file upload does not match the file download."); } } private static String getFileChecksum(File file) throws Exception { MessageDigest md = MessageDigest.getInstance("MD5"); try (FileInputStream fis = new FileInputStream(file); FileChannel ch = fis.getChannel()) { final ByteBuffer buf = ByteBuffer.allocateDirect(8192); int b = ch.read(buf); while (b != -1 && b != 0) { buf.flip(); final byte[] bytes = new byte[b]; buf.get(bytes); md.update(bytes, 0, b); buf.clear(); b = ch.read(buf); } ch.close(); fis.close(); return md.digest().toString(); } } }
Should we show off StorageClient.deleteContainer here instead of ContainerClient.delete?
public static void main (String[] args) { String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /** * Create 3 different containers from the storageClient. */ for (int i = 0; i < 3; i++) { storageClient.createContainer("mycontainersforlisting" + i + System.currentTimeMillis()); } /** * List the containers' name under the Azure storage account. */ storageClient.listContainers().forEach( containerItem -> { System.out.println("Container name: " + containerItem.name()); /** * Clean up the containers at the same time. */ storageClient.getContainerClient(containerItem.name()).delete(); } ); }
);
public static void main (String[] args) { String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /** * Create 3 different containers from the storageClient. */ for (int i = 0; i < 3; i++) { storageClient.createContainer("mycontainersforlisting" + i + System.currentTimeMillis()); } /** * List the containers' name under the Azure storage account. */ storageClient.listContainers().forEach( containerItem -> { System.out.println("Container name: " + containerItem.name()); /** * Clean up the containers at the same time. */ storageClient.getContainerClient(containerItem.name()).delete(); } ); }
class ListContainersExample { private static String getAccountName() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_NAME"); } private static String getAccountKey() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_KEY"); } }
class ListContainersExample { }
Especially if we already are at the storage client instead of the container client. We should, however, leave a comment saying that it's a shortcut for the code currently here.
public static void main (String[] args) { String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /** * Create 3 different containers from the storageClient. */ for (int i = 0; i < 3; i++) { storageClient.createContainer("mycontainersforlisting" + i + System.currentTimeMillis()); } /** * List the containers' name under the Azure storage account. */ storageClient.listContainers().forEach( containerItem -> { System.out.println("Container name: " + containerItem.name()); /** * Clean up the containers at the same time. */ storageClient.getContainerClient(containerItem.name()).delete(); } ); }
);
public static void main (String[] args) { String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /** * Create 3 different containers from the storageClient. */ for (int i = 0; i < 3; i++) { storageClient.createContainer("mycontainersforlisting" + i + System.currentTimeMillis()); } /** * List the containers' name under the Azure storage account. */ storageClient.listContainers().forEach( containerItem -> { System.out.println("Container name: " + containerItem.name()); /** * Clean up the containers at the same time. */ storageClient.getContainerClient(containerItem.name()).delete(); } ); }
class ListContainersExample { private static String getAccountName() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_NAME"); } private static String getAccountKey() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_KEY"); } }
class ListContainersExample { }
I have tried to delete at the storage client level. I did not find the deleteContainer API. Are we supposed to have the API?
public static void main (String[] args) { String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /** * Create 3 different containers from the storageClient. */ for (int i = 0; i < 3; i++) { storageClient.createContainer("mycontainersforlisting" + i + System.currentTimeMillis()); } /** * List the containers' name under the Azure storage account. */ storageClient.listContainers().forEach( containerItem -> { System.out.println("Container name: " + containerItem.name()); /** * Clean up the containers at the same time. */ storageClient.getContainerClient(containerItem.name()).delete(); } ); }
);
public static void main (String[] args) { String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /** * Create 3 different containers from the storageClient. */ for (int i = 0; i < 3; i++) { storageClient.createContainer("mycontainersforlisting" + i + System.currentTimeMillis()); } /** * List the containers' name under the Azure storage account. */ storageClient.listContainers().forEach( containerItem -> { System.out.println("Container name: " + containerItem.name()); /** * Clean up the containers at the same time. */ storageClient.getContainerClient(containerItem.name()).delete(); } ); }
class ListContainersExample { private static String getAccountName() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_NAME"); } private static String getAccountKey() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_KEY"); } }
class ListContainersExample { }
Huh, it does appear to be missing deleteContainer, open up an issue for that and we'll resolve. @jaschrep-msft and @rickle-msft good with resolving this in preview 2?
public static void main (String[] args) { String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /** * Create 3 different containers from the storageClient. */ for (int i = 0; i < 3; i++) { storageClient.createContainer("mycontainersforlisting" + i + System.currentTimeMillis()); } /** * List the containers' name under the Azure storage account. */ storageClient.listContainers().forEach( containerItem -> { System.out.println("Container name: " + containerItem.name()); /** * Clean up the containers at the same time. */ storageClient.getContainerClient(containerItem.name()).delete(); } ); }
);
public static void main (String[] args) { String accountName = getAccountName(); String accountKey = getAccountKey(); /** * Use your Storage account's name and key to create a credential object; this is used to access your account. */ SharedKeyCredential credential = new SharedKeyCredential(accountName, accountKey); /** * From the Azure portal, get your Storage account blob service URL endpoint. * The URL typically looks like this: */ String endpoint = String.format(Locale.ROOT, "https: /** * Create a StorageClient object that wraps the service endpoint, credential and a request pipeline. */ StorageClient storageClient = StorageClient.storageClientBuilder().endpoint(endpoint).credential(credential).buildClient(); /** * Create 3 different containers from the storageClient. */ for (int i = 0; i < 3; i++) { storageClient.createContainer("mycontainersforlisting" + i + System.currentTimeMillis()); } /** * List the containers' name under the Azure storage account. */ storageClient.listContainers().forEach( containerItem -> { System.out.println("Container name: " + containerItem.name()); /** * Clean up the containers at the same time. */ storageClient.getContainerClient(containerItem.name()).delete(); } ); }
class ListContainersExample { private static String getAccountName() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_NAME"); } private static String getAccountKey() { return ConfigurationManager.getConfiguration().get("PRIMARY_STORAGE_ACCOUNT_KEY"); } }
class ListContainersExample { }
This first check can be rolled into your isValidLibrary method because it appears you're also doing a validity check in this if statement. ```java private String getInvalidTypeNameFromTypeArgument(DetailAST typeArgumentToken) { … if (identToken == null) { return null; } final String typeName = identToken.getText(); return isValidLibrary(typeName) ? null : typeName; } private boolean isValidLibrary(String typeName) { // If the qualified class name does not exist in the map, it implies the type is a primitive Java type (ie. int, long, etc). if (!classPathMap.containsKey(typeName)) { return true; } String qualifiedName = classPathMap.get(typeName); return VALID_DEPENDENCY_PACKAGE_NAMES.stream() .anyMatch(validPackageName -> qualifiedName.startsWith(validPackageName)); } ```
private String getInvalidTypeNameFromTypeArgument(DetailAST typeArgumentToken) { if (typeArgumentToken == null) { return null; } final DetailAST identToken = typeArgumentToken.findFirstToken(TokenTypes.IDENT); if (identToken != null) { final String typeName = identToken.getText(); if (classPathMap.containsKey(typeName) && !isValidLibrary(classPathMap.get(typeName))) { return typeName; } } return null; }
if (classPathMap.containsKey(typeName) && !isValidLibrary(classPathMap.get(typeName))) {
private String getInvalidTypeNameFromTypeArgument(DetailAST typeArgumentToken) { final DetailAST identToken = typeArgumentToken.findFirstToken(TokenTypes.IDENT); if (identToken == null) { return null; } final String typeName = identToken.getText(); return isValidClassDependency(typeName) ? null : typeName; }
class and value is the full package path of class. * * @param token the IMPORT AST node */ private void addImportedClassPath(DetailAST token) { final String importClassPath = FullIdent.createFullIdentBelow(token).getText(); final String className = importClassPath.substring(importClassPath.lastIndexOf(DOT) + 1); classPathMap.put(className, importClassPath); }
class and value is the full package path of class. final String importClassPath = FullIdent.createFullIdentBelow(token).getText(); final String className = importClassPath.substring(importClassPath.lastIndexOf(".") + 1); simpleClassNameToQualifiedNameMap.put(className, importClassPath); break; case TokenTypes.METHOD_DEF: checkNoExternalDependencyExposed(token); break; default: break; } } /** * Checks for external dependency, log the error if it is an invalid external dependency. * * @param methodDefToken METHOD_DEF AST node */ private void checkNoExternalDependencyExposed(DetailAST methodDefToken) { final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (!accessModifier.equals(AccessModifier.PUBLIC) && !accessModifier.equals(AccessModifier.PROTECTED)) { return; } final DetailAST typeToken = methodDefToken.findFirstToken(TokenTypes.TYPE); if (typeToken != null) { getInvalidReturnTypes(typeToken).forEach( (token, returnTypeName) -> log(token, String.format(EXTERNAL_DEPENDENCY_ERROR, returnTypeName))); } final DetailAST parametersToken = methodDefToken.findFirstToken(TokenTypes.PARAMETERS); if (parametersToken != null) { getInvalidParameterTypes(parametersToken).forEach( (token, returnTypeName) -> log(token, String.format(EXTERNAL_DEPENDENCY_ERROR, returnTypeName))); } }
You're not checking if the "library" is valid, you're checking if it's a valid class dependency. imho, a library is a collection of related classes
private String getInvalidTypeNameFromTypeArgument(DetailAST typeArgumentToken) { if (typeArgumentToken == null) { return null; } final DetailAST identToken = typeArgumentToken.findFirstToken(TokenTypes.IDENT); if (identToken != null) { final String typeName = identToken.getText(); if (classPathMap.containsKey(typeName) && !isValidLibrary(classPathMap.get(typeName))) { return typeName; } } return null; }
if (classPathMap.containsKey(typeName) && !isValidLibrary(classPathMap.get(typeName))) {
private String getInvalidTypeNameFromTypeArgument(DetailAST typeArgumentToken) { final DetailAST identToken = typeArgumentToken.findFirstToken(TokenTypes.IDENT); if (identToken == null) { return null; } final String typeName = identToken.getText(); return isValidClassDependency(typeName) ? null : typeName; }
class and value is the full package path of class. * * @param token the IMPORT AST node */ private void addImportedClassPath(DetailAST token) { final String importClassPath = FullIdent.createFullIdentBelow(token).getText(); final String className = importClassPath.substring(importClassPath.lastIndexOf(DOT) + 1); classPathMap.put(className, importClassPath); }
class and value is the full package path of class. final String importClassPath = FullIdent.createFullIdentBelow(token).getText(); final String className = importClassPath.substring(importClassPath.lastIndexOf(".") + 1); simpleClassNameToQualifiedNameMap.put(className, importClassPath); break; case TokenTypes.METHOD_DEF: checkNoExternalDependencyExposed(token); break; default: break; } } /** * Checks for external dependency, log the error if it is an invalid external dependency. * * @param methodDefToken METHOD_DEF AST node */ private void checkNoExternalDependencyExposed(DetailAST methodDefToken) { final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (!accessModifier.equals(AccessModifier.PUBLIC) && !accessModifier.equals(AccessModifier.PROTECTED)) { return; } final DetailAST typeToken = methodDefToken.findFirstToken(TokenTypes.TYPE); if (typeToken != null) { getInvalidReturnTypes(typeToken).forEach( (token, returnTypeName) -> log(token, String.format(EXTERNAL_DEPENDENCY_ERROR, returnTypeName))); } final DetailAST parametersToken = methodDefToken.findFirstToken(TokenTypes.PARAMETERS); if (parametersToken != null) { getInvalidParameterTypes(parametersToken).forEach( (token, returnTypeName) -> log(token, String.format(EXTERNAL_DEPENDENCY_ERROR, returnTypeName))); } }
I'd like to see some comments throughout your code saying what is being done, to make it easier for readers to determine. e.g. "Getting the modifier of the method to determine if it is public or protected"
private void checkNoExternalDependencyExposed(DetailAST methodDefToken) { final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); if (modifiersToken == null) { return; } AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (!accessModifier.equals(AccessModifier.PUBLIC) && !accessModifier.equals(AccessModifier.PROTECTED)) { return; } DetailAST typeToken = methodDefToken.findFirstToken(TokenTypes.TYPE); if (typeToken != null) { getInvalidReturnTypes(typeToken).forEach( (token, returnTypeName) -> log(token, String.format(EXTERNAL_DEPENDENCY_ERROR, returnTypeName))); } DetailAST parametersToken = methodDefToken.findFirstToken(TokenTypes.PARAMETERS); if (parametersToken != null) { getInvalidParameterTypes(parametersToken).forEach( (token, returnTypeName) -> log(token, String.format(EXTERNAL_DEPENDENCY_ERROR, returnTypeName))); } }
AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken);
private void checkNoExternalDependencyExposed(DetailAST methodDefToken) { final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (!accessModifier.equals(AccessModifier.PUBLIC) && !accessModifier.equals(AccessModifier.PROTECTED)) { return; } final DetailAST typeToken = methodDefToken.findFirstToken(TokenTypes.TYPE); if (typeToken != null) { getInvalidReturnTypes(typeToken).forEach( (token, returnTypeName) -> log(token, String.format(EXTERNAL_DEPENDENCY_ERROR, returnTypeName))); } final DetailAST parametersToken = methodDefToken.findFirstToken(TokenTypes.PARAMETERS); if (parametersToken != null) { getInvalidParameterTypes(parametersToken).forEach( (token, returnTypeName) -> log(token, String.format(EXTERNAL_DEPENDENCY_ERROR, returnTypeName))); } }
class and value is the full package path of class. * * @param token the IMPORT AST node */ private void addImportedClassPath(DetailAST token) { final String importClassPath = FullIdent.createFullIdentBelow(token).getText(); final String className = importClassPath.substring(importClassPath.lastIndexOf(DOT) + 1); simpleClassNameToQualifiedNameMap.put(className, importClassPath); }
class and value is the full package path of class. final String importClassPath = FullIdent.createFullIdentBelow(token).getText(); final String className = importClassPath.substring(importClassPath.lastIndexOf(".") + 1); simpleClassNameToQualifiedNameMap.put(className, importClassPath); break; case TokenTypes.METHOD_DEF: checkNoExternalDependencyExposed(token); break; default: break; } } /** * Checks for external dependency, log the error if it is an invalid external dependency. * * @param methodDefToken METHOD_DEF AST node */
Rather than `@ServiceClient`, are you able to state the class name? Also, small wording suggestion: ```suggestion log(modifiersToken, String.format("The variable field ''%s'' of @ServiceClient should be final. Classes annotated with @ServiceClient are supposed to be immutable.", ```
private void checkClassField(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF != ast.getType()) { continue; } final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL)) { log(modifiersToken, String.format("The variable field ''%s'' of @ServiceClient should be final. The class annotated with @ServiceClient supposed to be immutable.", ast.findFirstToken(TokenTypes.IDENT).getText())); } } }
log(modifiersToken, String.format("The variable field ''%s'' of @ServiceClient should be final. The class annotated with @ServiceClient supposed to be immutable.",
private void checkClassField(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF != ast.getType()) { continue; } final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL)) { log(modifiersToken, String.format("The variable field ''%s'' of class ''%s'' should be final. Classes annotated with @ServiceClient are supposed to be immutable.", ast.findFirstToken(TokenTypes.IDENT).getText(), objBlockToken.getPreviousSibling().getText())); } } }
class of ServiceClient. * These fields should be final because these classes supposed to be immutable class. * * @param objBlockToken the OBJBLOCK AST node */
class should not have a public static method named ''builder''."); } } /** * Checks that the field variables in the @ServiceClient are final. ServiceClients should be immutable. * * @param objBlockToken the OBJBLOCK AST node */
```suggestion log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); ```
private void checkServiceClientNaming(DetailAST classDefToken) { if (!hasServiceClientAnnotation) { return; } final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' should named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s should named <ServiceName>Client.", className)); } }
log(classDefToken, String.format("Async class ''%s'' should named <ServiceName>AsyncClient ", className));
private void checkServiceClientNaming(DetailAST classDefToken) { final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } }
class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */
class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */
```suggestion log(classDefToken, String.format("Sync class %s should named <ServiceName>Client.", className)); ``` ```suggestion log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); ```
private void checkServiceClientNaming(DetailAST classDefToken) { if (!hasServiceClientAnnotation) { return; } final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' should named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s should named <ServiceName>Client.", className)); } }
log(classDefToken, String.format("Sync class %s should named <ServiceName>Client.", className));
private void checkServiceClientNaming(DetailAST classDefToken) { final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } }
class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */
class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */
I would add pre-emptive checks on these constructor and method_def checks that: ```java if (hasServiceClientAnnotation) { // then do the check. Otherwise, you're doing the check on classes that aren't serviceclients. } ```
public void visitToken(DetailAST token) { if (!hasServiceClientAnnotation) { return; } switch (token.getType()) { case TokenTypes.CLASS_DEF: hasServiceClientAnnotation = hasServiceClientAnnotation(token); checkServiceClientNaming(token); break; case TokenTypes.CTOR_DEF: checkConstructor(token); break; case TokenTypes.METHOD_DEF: checkMethodName(token); break; case TokenTypes.OBJBLOCK: checkClassField(token); break; default: break; } }
checkConstructor(token);
public void visitToken(DetailAST token) { if (isImplPackage) { return; } switch (token.getType()) { case TokenTypes.PACKAGE_DEF: String packageName = FullIdent.createFullIdent(token.findFirstToken(TokenTypes.DOT)).getText(); isImplPackage = packageName.contains(".implementation"); break; case TokenTypes.CLASS_DEF: hasServiceClientAnnotation = hasServiceClientAnnotation(token); if (hasServiceClientAnnotation) { checkServiceClientNaming(token); } break; case TokenTypes.CTOR_DEF: if (hasServiceClientAnnotation) { checkConstructor(token); } break; case TokenTypes.METHOD_DEF: if (hasServiceClientAnnotation) { checkMethodName(token); } break; case TokenTypes.OBJBLOCK: if (hasServiceClientAnnotation) { checkClassField(token); } break; default: break; } }
class ServiceClientInstantiationCheck extends AbstractCheck { private static final String SERVICE_CLIENT = "ServiceClient"; private static final String BUILDER = "builder"; private static final String ASYNC_CLIENT ="AsyncClient"; private static final String CLIENT = "Client"; private static final String IS_ASYNC = "isAsync"; private static boolean hasServiceClientAnnotation; private static boolean isAsync; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.CLASS_DEF, TokenTypes.CTOR_DEF, TokenTypes.METHOD_DEF, TokenTypes.OBJBLOCK }; } @Override public void beginTree(DetailAST root) { hasServiceClientAnnotation = true; isAsync = false; } @Override /** * Checks if the class is annotated with annotation @ServiceClient. A class could have multiple annotations. * * @param classDefToken the CLASS_DEF AST node * @return true if the class is annotated with @ServiceClient, false otherwise. */ private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); if (annotationIdent != null && SERVICE_CLIENT.equals(annotationIdent.getText())) { isAsync = isAsyncServiceClient(ast); return true; } } return false; } /** * Checks for public or protected constructor for the service client class. * Log error if the service client has public or protected constructor. * * @param ctorToken the CTOR_DEF AST node */ private void checkConstructor(DetailAST ctorToken) { final DetailAST modifiersToken = ctorToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) || accessModifier.equals(AccessModifier.PROTECTED)) { log(modifiersToken, "@ServiceClient class should not have any public or protected constructor."); } } /** * Checks for public static method named 'builder'. Should avoid to use method name, 'builder'. * * @param methodDefToken the METHOD_DEF AST node */ private void checkMethodName(DetailAST methodDefToken) { final DetailAST methodNameToken = methodDefToken.findFirstToken(TokenTypes.IDENT); if (!BUILDER.equals(methodNameToken.getText())) { return; } final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) && modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)) { log(modifiersToken, "@ServiceClient class should not have a public static method named ''builder''."); } } /** * Checks for the variable field of the subclass of ServiceClient. * These fields should be final because these classes supposed to be immutable class. * * @param objBlockToken the OBJBLOCK AST node */ private void checkClassField(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF != ast.getType()) { continue; } final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL)) { log(modifiersToken, String.format("The variable field ''%s'' of class ''%s'' should be final. Classes annotated with @ServiceClient are supposed to be immutable.", ast.findFirstToken(TokenTypes.IDENT).getText(), objBlockToken.getPreviousSibling().getText())); } } } /** * Checks for the class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */ private void checkServiceClientNaming(DetailAST classDefToken) { if (!hasServiceClientAnnotation) { return; } final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } } /** * A function checks if the annotation node has a member key is {@code IS_ASYNC} with value equals to 'true'. * If the value equals 'true', which indicates the @ServiceClient is an asynchronous client. * If the member pair is missing. By default, it is a synchronous service client. * * @param annotationToken the ANNOTATION AST node * @return true if the annotation has {@code IS_ASYNC} value 'true', otherwise, false. */ private boolean isAsyncServiceClient(DetailAST annotationToken) { for (DetailAST ast = annotationToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { continue; } final DetailAST identToken = ast.findFirstToken(TokenTypes.IDENT); if (identToken == null) { continue; } if (!IS_ASYNC.equals(identToken.getText())) { continue; } final DetailAST exprToken = ast.findFirstToken(TokenTypes.EXPR); if (exprToken == null) { continue; } return exprToken.branchContains(TokenTypes.LITERAL_TRUE); } return false; } }
class ServiceClientInstantiationCheck extends AbstractCheck { private static final String SERVICE_CLIENT = "ServiceClient"; private static final String BUILDER = "builder"; private static final String ASYNC_CLIENT ="AsyncClient"; private static final String CLIENT = "Client"; private static final String IS_ASYNC = "isAsync"; private static boolean hasServiceClientAnnotation; private static boolean isAsync; private static boolean isImplPackage; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.PACKAGE_DEF, TokenTypes.CLASS_DEF, TokenTypes.CTOR_DEF, TokenTypes.METHOD_DEF, TokenTypes.OBJBLOCK }; } @Override public void beginTree(DetailAST root) { hasServiceClientAnnotation = false; isAsync = false; isImplPackage = false; } @Override /** * Checks if the class is annotated with annotation @ServiceClient. A class could have multiple annotations. * * @param classDefToken the CLASS_DEF AST node * @return true if the class is annotated with @ServiceClient, false otherwise. */ private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); if (annotationIdent != null && SERVICE_CLIENT.equals(annotationIdent.getText())) { isAsync = isAsyncServiceClient(ast); return true; } } return false; } /** * Checks for public or protected constructor for the service client class. * Log error if the service client has public or protected constructor. * * @param ctorToken the CTOR_DEF AST node */ private void checkConstructor(DetailAST ctorToken) { final DetailAST modifiersToken = ctorToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) || accessModifier.equals(AccessModifier.PROTECTED)) { log(modifiersToken, "@ServiceClient class should not have any public or protected constructor."); } } /** * Checks for public static method named 'builder'. Should avoid to use method name, 'builder'. * * @param methodDefToken the METHOD_DEF AST node */ private void checkMethodName(DetailAST methodDefToken) { final DetailAST methodNameToken = methodDefToken.findFirstToken(TokenTypes.IDENT); if (!BUILDER.equals(methodNameToken.getText())) { return; } final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) && modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)) { log(modifiersToken, "@ServiceClient class should not have a public static method named ''builder''."); } } /** * Checks that the field variables in the @ServiceClient are final. ServiceClients should be immutable. * * @param objBlockToken the OBJBLOCK AST node */ private void checkClassField(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF != ast.getType()) { continue; } final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL)) { log(modifiersToken, String.format("The variable field ''%s'' of class ''%s'' should be final. Classes annotated with @ServiceClient are supposed to be immutable.", ast.findFirstToken(TokenTypes.IDENT).getText(), objBlockToken.getPreviousSibling().getText())); } } } /** * Checks for the class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */ private void checkServiceClientNaming(DetailAST classDefToken) { final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } } /** * A function checks if the annotation node has a member key is {@code IS_ASYNC} with value equals to 'true'. * If the value equals 'true', which indicates the @ServiceClient is an asynchronous client. * If the member pair is missing. By default, it is a synchronous service client. * * @param annotationToken the ANNOTATION AST node * @return true if the annotation has {@code IS_ASYNC} value 'true', otherwise, false. */ private boolean isAsyncServiceClient(DetailAST annotationToken) { for (DetailAST ast = annotationToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { continue; } final DetailAST identToken = ast.findFirstToken(TokenTypes.IDENT); if (identToken == null) { continue; } if (!IS_ASYNC.equals(identToken.getText())) { continue; } final DetailAST exprToken = ast.findFirstToken(TokenTypes.EXPR); if (exprToken == null) { continue; } return exprToken.branchContains(TokenTypes.LITERAL_TRUE); } return false; } }
The condition is already checked in at beginning of visitToken()
public void visitToken(DetailAST token) { if (!hasServiceClientAnnotation) { return; } switch (token.getType()) { case TokenTypes.CLASS_DEF: hasServiceClientAnnotation = hasServiceClientAnnotation(token); checkServiceClientNaming(token); break; case TokenTypes.CTOR_DEF: checkConstructor(token); break; case TokenTypes.METHOD_DEF: checkMethodName(token); break; case TokenTypes.OBJBLOCK: checkClassField(token); break; default: break; } }
checkConstructor(token);
public void visitToken(DetailAST token) { if (isImplPackage) { return; } switch (token.getType()) { case TokenTypes.PACKAGE_DEF: String packageName = FullIdent.createFullIdent(token.findFirstToken(TokenTypes.DOT)).getText(); isImplPackage = packageName.contains(".implementation"); break; case TokenTypes.CLASS_DEF: hasServiceClientAnnotation = hasServiceClientAnnotation(token); if (hasServiceClientAnnotation) { checkServiceClientNaming(token); } break; case TokenTypes.CTOR_DEF: if (hasServiceClientAnnotation) { checkConstructor(token); } break; case TokenTypes.METHOD_DEF: if (hasServiceClientAnnotation) { checkMethodName(token); } break; case TokenTypes.OBJBLOCK: if (hasServiceClientAnnotation) { checkClassField(token); } break; default: break; } }
class ServiceClientInstantiationCheck extends AbstractCheck { private static final String SERVICE_CLIENT = "ServiceClient"; private static final String BUILDER = "builder"; private static final String ASYNC_CLIENT ="AsyncClient"; private static final String CLIENT = "Client"; private static final String IS_ASYNC = "isAsync"; private static boolean hasServiceClientAnnotation; private static boolean isAsync; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.CLASS_DEF, TokenTypes.CTOR_DEF, TokenTypes.METHOD_DEF, TokenTypes.OBJBLOCK }; } @Override public void beginTree(DetailAST root) { hasServiceClientAnnotation = true; isAsync = false; } @Override /** * Checks if the class is annotated with annotation @ServiceClient. A class could have multiple annotations. * * @param classDefToken the CLASS_DEF AST node * @return true if the class is annotated with @ServiceClient, false otherwise. */ private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); if (annotationIdent != null && SERVICE_CLIENT.equals(annotationIdent.getText())) { isAsync = isAsyncServiceClient(ast); return true; } } return false; } /** * Checks for public or protected constructor for the service client class. * Log error if the service client has public or protected constructor. * * @param ctorToken the CTOR_DEF AST node */ private void checkConstructor(DetailAST ctorToken) { final DetailAST modifiersToken = ctorToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) || accessModifier.equals(AccessModifier.PROTECTED)) { log(modifiersToken, "@ServiceClient class should not have any public or protected constructor."); } } /** * Checks for public static method named 'builder'. Should avoid to use method name, 'builder'. * * @param methodDefToken the METHOD_DEF AST node */ private void checkMethodName(DetailAST methodDefToken) { final DetailAST methodNameToken = methodDefToken.findFirstToken(TokenTypes.IDENT); if (!BUILDER.equals(methodNameToken.getText())) { return; } final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) && modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)) { log(modifiersToken, "@ServiceClient class should not have a public static method named ''builder''."); } } /** * Checks for the variable field of the subclass of ServiceClient. * These fields should be final because these classes supposed to be immutable class. * * @param objBlockToken the OBJBLOCK AST node */ private void checkClassField(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF != ast.getType()) { continue; } final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL)) { log(modifiersToken, String.format("The variable field ''%s'' of class ''%s'' should be final. Classes annotated with @ServiceClient are supposed to be immutable.", ast.findFirstToken(TokenTypes.IDENT).getText(), objBlockToken.getPreviousSibling().getText())); } } } /** * Checks for the class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */ private void checkServiceClientNaming(DetailAST classDefToken) { if (!hasServiceClientAnnotation) { return; } final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } } /** * A function checks if the annotation node has a member key is {@code IS_ASYNC} with value equals to 'true'. * If the value equals 'true', which indicates the @ServiceClient is an asynchronous client. * If the member pair is missing. By default, it is a synchronous service client. * * @param annotationToken the ANNOTATION AST node * @return true if the annotation has {@code IS_ASYNC} value 'true', otherwise, false. */ private boolean isAsyncServiceClient(DetailAST annotationToken) { for (DetailAST ast = annotationToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { continue; } final DetailAST identToken = ast.findFirstToken(TokenTypes.IDENT); if (identToken == null) { continue; } if (!IS_ASYNC.equals(identToken.getText())) { continue; } final DetailAST exprToken = ast.findFirstToken(TokenTypes.EXPR); if (exprToken == null) { continue; } return exprToken.branchContains(TokenTypes.LITERAL_TRUE); } return false; } }
class ServiceClientInstantiationCheck extends AbstractCheck { private static final String SERVICE_CLIENT = "ServiceClient"; private static final String BUILDER = "builder"; private static final String ASYNC_CLIENT ="AsyncClient"; private static final String CLIENT = "Client"; private static final String IS_ASYNC = "isAsync"; private static boolean hasServiceClientAnnotation; private static boolean isAsync; private static boolean isImplPackage; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.PACKAGE_DEF, TokenTypes.CLASS_DEF, TokenTypes.CTOR_DEF, TokenTypes.METHOD_DEF, TokenTypes.OBJBLOCK }; } @Override public void beginTree(DetailAST root) { hasServiceClientAnnotation = false; isAsync = false; isImplPackage = false; } @Override /** * Checks if the class is annotated with annotation @ServiceClient. A class could have multiple annotations. * * @param classDefToken the CLASS_DEF AST node * @return true if the class is annotated with @ServiceClient, false otherwise. */ private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); if (annotationIdent != null && SERVICE_CLIENT.equals(annotationIdent.getText())) { isAsync = isAsyncServiceClient(ast); return true; } } return false; } /** * Checks for public or protected constructor for the service client class. * Log error if the service client has public or protected constructor. * * @param ctorToken the CTOR_DEF AST node */ private void checkConstructor(DetailAST ctorToken) { final DetailAST modifiersToken = ctorToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) || accessModifier.equals(AccessModifier.PROTECTED)) { log(modifiersToken, "@ServiceClient class should not have any public or protected constructor."); } } /** * Checks for public static method named 'builder'. Should avoid to use method name, 'builder'. * * @param methodDefToken the METHOD_DEF AST node */ private void checkMethodName(DetailAST methodDefToken) { final DetailAST methodNameToken = methodDefToken.findFirstToken(TokenTypes.IDENT); if (!BUILDER.equals(methodNameToken.getText())) { return; } final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) && modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)) { log(modifiersToken, "@ServiceClient class should not have a public static method named ''builder''."); } } /** * Checks that the field variables in the @ServiceClient are final. ServiceClients should be immutable. * * @param objBlockToken the OBJBLOCK AST node */ private void checkClassField(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF != ast.getType()) { continue; } final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL)) { log(modifiersToken, String.format("The variable field ''%s'' of class ''%s'' should be final. Classes annotated with @ServiceClient are supposed to be immutable.", ast.findFirstToken(TokenTypes.IDENT).getText(), objBlockToken.getPreviousSibling().getText())); } } } /** * Checks for the class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */ private void checkServiceClientNaming(DetailAST classDefToken) { final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } } /** * A function checks if the annotation node has a member key is {@code IS_ASYNC} with value equals to 'true'. * If the value equals 'true', which indicates the @ServiceClient is an asynchronous client. * If the member pair is missing. By default, it is a synchronous service client. * * @param annotationToken the ANNOTATION AST node * @return true if the annotation has {@code IS_ASYNC} value 'true', otherwise, false. */ private boolean isAsyncServiceClient(DetailAST annotationToken) { for (DetailAST ast = annotationToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { continue; } final DetailAST identToken = ast.findFirstToken(TokenTypes.IDENT); if (identToken == null) { continue; } if (!IS_ASYNC.equals(identToken.getText())) { continue; } final DetailAST exprToken = ast.findFirstToken(TokenTypes.EXPR); if (exprToken == null) { continue; } return exprToken.branchContains(TokenTypes.LITERAL_TRUE); } return false; } }
Seems odd you would pre-emptively set the field to "true" when beginning a walk because you don't really know if that file has a ServiceClient annotation, yet.
public void visitToken(DetailAST token) { if (!hasServiceClientAnnotation) { return; } switch (token.getType()) { case TokenTypes.CLASS_DEF: hasServiceClientAnnotation = hasServiceClientAnnotation(token); checkServiceClientNaming(token); break; case TokenTypes.CTOR_DEF: checkConstructor(token); break; case TokenTypes.METHOD_DEF: checkMethodName(token); break; case TokenTypes.OBJBLOCK: checkClassField(token); break; default: break; } }
checkConstructor(token);
public void visitToken(DetailAST token) { if (isImplPackage) { return; } switch (token.getType()) { case TokenTypes.PACKAGE_DEF: String packageName = FullIdent.createFullIdent(token.findFirstToken(TokenTypes.DOT)).getText(); isImplPackage = packageName.contains(".implementation"); break; case TokenTypes.CLASS_DEF: hasServiceClientAnnotation = hasServiceClientAnnotation(token); if (hasServiceClientAnnotation) { checkServiceClientNaming(token); } break; case TokenTypes.CTOR_DEF: if (hasServiceClientAnnotation) { checkConstructor(token); } break; case TokenTypes.METHOD_DEF: if (hasServiceClientAnnotation) { checkMethodName(token); } break; case TokenTypes.OBJBLOCK: if (hasServiceClientAnnotation) { checkClassField(token); } break; default: break; } }
class ServiceClientInstantiationCheck extends AbstractCheck { private static final String SERVICE_CLIENT = "ServiceClient"; private static final String BUILDER = "builder"; private static final String ASYNC_CLIENT ="AsyncClient"; private static final String CLIENT = "Client"; private static final String IS_ASYNC = "isAsync"; private static boolean hasServiceClientAnnotation; private static boolean isAsync; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.CLASS_DEF, TokenTypes.CTOR_DEF, TokenTypes.METHOD_DEF, TokenTypes.OBJBLOCK }; } @Override public void beginTree(DetailAST root) { hasServiceClientAnnotation = true; isAsync = false; } @Override /** * Checks if the class is annotated with annotation @ServiceClient. A class could have multiple annotations. * * @param classDefToken the CLASS_DEF AST node * @return true if the class is annotated with @ServiceClient, false otherwise. */ private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); if (annotationIdent != null && SERVICE_CLIENT.equals(annotationIdent.getText())) { isAsync = isAsyncServiceClient(ast); return true; } } return false; } /** * Checks for public or protected constructor for the service client class. * Log error if the service client has public or protected constructor. * * @param ctorToken the CTOR_DEF AST node */ private void checkConstructor(DetailAST ctorToken) { final DetailAST modifiersToken = ctorToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) || accessModifier.equals(AccessModifier.PROTECTED)) { log(modifiersToken, "@ServiceClient class should not have any public or protected constructor."); } } /** * Checks for public static method named 'builder'. Should avoid to use method name, 'builder'. * * @param methodDefToken the METHOD_DEF AST node */ private void checkMethodName(DetailAST methodDefToken) { final DetailAST methodNameToken = methodDefToken.findFirstToken(TokenTypes.IDENT); if (!BUILDER.equals(methodNameToken.getText())) { return; } final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) && modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)) { log(modifiersToken, "@ServiceClient class should not have a public static method named ''builder''."); } } /** * Checks for the variable field of the subclass of ServiceClient. * These fields should be final because these classes supposed to be immutable class. * * @param objBlockToken the OBJBLOCK AST node */ private void checkClassField(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF != ast.getType()) { continue; } final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL)) { log(modifiersToken, String.format("The variable field ''%s'' of class ''%s'' should be final. Classes annotated with @ServiceClient are supposed to be immutable.", ast.findFirstToken(TokenTypes.IDENT).getText(), objBlockToken.getPreviousSibling().getText())); } } } /** * Checks for the class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */ private void checkServiceClientNaming(DetailAST classDefToken) { if (!hasServiceClientAnnotation) { return; } final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } } /** * A function checks if the annotation node has a member key is {@code IS_ASYNC} with value equals to 'true'. * If the value equals 'true', which indicates the @ServiceClient is an asynchronous client. * If the member pair is missing. By default, it is a synchronous service client. * * @param annotationToken the ANNOTATION AST node * @return true if the annotation has {@code IS_ASYNC} value 'true', otherwise, false. */ private boolean isAsyncServiceClient(DetailAST annotationToken) { for (DetailAST ast = annotationToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { continue; } final DetailAST identToken = ast.findFirstToken(TokenTypes.IDENT); if (identToken == null) { continue; } if (!IS_ASYNC.equals(identToken.getText())) { continue; } final DetailAST exprToken = ast.findFirstToken(TokenTypes.EXPR); if (exprToken == null) { continue; } return exprToken.branchContains(TokenTypes.LITERAL_TRUE); } return false; } }
class ServiceClientInstantiationCheck extends AbstractCheck { private static final String SERVICE_CLIENT = "ServiceClient"; private static final String BUILDER = "builder"; private static final String ASYNC_CLIENT ="AsyncClient"; private static final String CLIENT = "Client"; private static final String IS_ASYNC = "isAsync"; private static boolean hasServiceClientAnnotation; private static boolean isAsync; private static boolean isImplPackage; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.PACKAGE_DEF, TokenTypes.CLASS_DEF, TokenTypes.CTOR_DEF, TokenTypes.METHOD_DEF, TokenTypes.OBJBLOCK }; } @Override public void beginTree(DetailAST root) { hasServiceClientAnnotation = false; isAsync = false; isImplPackage = false; } @Override /** * Checks if the class is annotated with annotation @ServiceClient. A class could have multiple annotations. * * @param classDefToken the CLASS_DEF AST node * @return true if the class is annotated with @ServiceClient, false otherwise. */ private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); if (annotationIdent != null && SERVICE_CLIENT.equals(annotationIdent.getText())) { isAsync = isAsyncServiceClient(ast); return true; } } return false; } /** * Checks for public or protected constructor for the service client class. * Log error if the service client has public or protected constructor. * * @param ctorToken the CTOR_DEF AST node */ private void checkConstructor(DetailAST ctorToken) { final DetailAST modifiersToken = ctorToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) || accessModifier.equals(AccessModifier.PROTECTED)) { log(modifiersToken, "@ServiceClient class should not have any public or protected constructor."); } } /** * Checks for public static method named 'builder'. Should avoid to use method name, 'builder'. * * @param methodDefToken the METHOD_DEF AST node */ private void checkMethodName(DetailAST methodDefToken) { final DetailAST methodNameToken = methodDefToken.findFirstToken(TokenTypes.IDENT); if (!BUILDER.equals(methodNameToken.getText())) { return; } final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) && modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)) { log(modifiersToken, "@ServiceClient class should not have a public static method named ''builder''."); } } /** * Checks that the field variables in the @ServiceClient are final. ServiceClients should be immutable. * * @param objBlockToken the OBJBLOCK AST node */ private void checkClassField(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF != ast.getType()) { continue; } final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL)) { log(modifiersToken, String.format("The variable field ''%s'' of class ''%s'' should be final. Classes annotated with @ServiceClient are supposed to be immutable.", ast.findFirstToken(TokenTypes.IDENT).getText(), objBlockToken.getPreviousSibling().getText())); } } } /** * Checks for the class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */ private void checkServiceClientNaming(DetailAST classDefToken) { final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } } /** * A function checks if the annotation node has a member key is {@code IS_ASYNC} with value equals to 'true'. * If the value equals 'true', which indicates the @ServiceClient is an asynchronous client. * If the member pair is missing. By default, it is a synchronous service client. * * @param annotationToken the ANNOTATION AST node * @return true if the annotation has {@code IS_ASYNC} value 'true', otherwise, false. */ private boolean isAsyncServiceClient(DetailAST annotationToken) { for (DetailAST ast = annotationToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { continue; } final DetailAST identToken = ast.findFirstToken(TokenTypes.IDENT); if (identToken == null) { continue; } if (!IS_ASYNC.equals(identToken.getText())) { continue; } final DetailAST exprToken = ast.findFirstToken(TokenTypes.EXPR); if (exprToken == null) { continue; } return exprToken.branchContains(TokenTypes.LITERAL_TRUE); } return false; } }
Setting 'true' value to be able to walk the tree, The DFS tree traversal will first visit CLASS_DEF. If the class is annotated with @ServiceClient, the hasServiceClientAnnotation() method will return true, otherwise, it will return false.
public void visitToken(DetailAST token) { if (!hasServiceClientAnnotation) { return; } switch (token.getType()) { case TokenTypes.CLASS_DEF: hasServiceClientAnnotation = hasServiceClientAnnotation(token); checkServiceClientNaming(token); break; case TokenTypes.CTOR_DEF: checkConstructor(token); break; case TokenTypes.METHOD_DEF: checkMethodName(token); break; case TokenTypes.OBJBLOCK: checkClassField(token); break; default: break; } }
checkConstructor(token);
public void visitToken(DetailAST token) { if (isImplPackage) { return; } switch (token.getType()) { case TokenTypes.PACKAGE_DEF: String packageName = FullIdent.createFullIdent(token.findFirstToken(TokenTypes.DOT)).getText(); isImplPackage = packageName.contains(".implementation"); break; case TokenTypes.CLASS_DEF: hasServiceClientAnnotation = hasServiceClientAnnotation(token); if (hasServiceClientAnnotation) { checkServiceClientNaming(token); } break; case TokenTypes.CTOR_DEF: if (hasServiceClientAnnotation) { checkConstructor(token); } break; case TokenTypes.METHOD_DEF: if (hasServiceClientAnnotation) { checkMethodName(token); } break; case TokenTypes.OBJBLOCK: if (hasServiceClientAnnotation) { checkClassField(token); } break; default: break; } }
class ServiceClientInstantiationCheck extends AbstractCheck { private static final String SERVICE_CLIENT = "ServiceClient"; private static final String BUILDER = "builder"; private static final String ASYNC_CLIENT ="AsyncClient"; private static final String CLIENT = "Client"; private static final String IS_ASYNC = "isAsync"; private static boolean hasServiceClientAnnotation; private static boolean isAsync; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.CLASS_DEF, TokenTypes.CTOR_DEF, TokenTypes.METHOD_DEF, TokenTypes.OBJBLOCK }; } @Override public void beginTree(DetailAST root) { hasServiceClientAnnotation = true; isAsync = false; } @Override /** * Checks if the class is annotated with annotation @ServiceClient. A class could have multiple annotations. * * @param classDefToken the CLASS_DEF AST node * @return true if the class is annotated with @ServiceClient, false otherwise. */ private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); if (annotationIdent != null && SERVICE_CLIENT.equals(annotationIdent.getText())) { isAsync = isAsyncServiceClient(ast); return true; } } return false; } /** * Checks for public or protected constructor for the service client class. * Log error if the service client has public or protected constructor. * * @param ctorToken the CTOR_DEF AST node */ private void checkConstructor(DetailAST ctorToken) { final DetailAST modifiersToken = ctorToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) || accessModifier.equals(AccessModifier.PROTECTED)) { log(modifiersToken, "@ServiceClient class should not have any public or protected constructor."); } } /** * Checks for public static method named 'builder'. Should avoid to use method name, 'builder'. * * @param methodDefToken the METHOD_DEF AST node */ private void checkMethodName(DetailAST methodDefToken) { final DetailAST methodNameToken = methodDefToken.findFirstToken(TokenTypes.IDENT); if (!BUILDER.equals(methodNameToken.getText())) { return; } final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) && modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)) { log(modifiersToken, "@ServiceClient class should not have a public static method named ''builder''."); } } /** * Checks for the variable field of the subclass of ServiceClient. * These fields should be final because these classes supposed to be immutable class. * * @param objBlockToken the OBJBLOCK AST node */ private void checkClassField(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF != ast.getType()) { continue; } final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL)) { log(modifiersToken, String.format("The variable field ''%s'' of class ''%s'' should be final. Classes annotated with @ServiceClient are supposed to be immutable.", ast.findFirstToken(TokenTypes.IDENT).getText(), objBlockToken.getPreviousSibling().getText())); } } } /** * Checks for the class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */ private void checkServiceClientNaming(DetailAST classDefToken) { if (!hasServiceClientAnnotation) { return; } final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } } /** * A function checks if the annotation node has a member key is {@code IS_ASYNC} with value equals to 'true'. * If the value equals 'true', which indicates the @ServiceClient is an asynchronous client. * If the member pair is missing. By default, it is a synchronous service client. * * @param annotationToken the ANNOTATION AST node * @return true if the annotation has {@code IS_ASYNC} value 'true', otherwise, false. */ private boolean isAsyncServiceClient(DetailAST annotationToken) { for (DetailAST ast = annotationToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { continue; } final DetailAST identToken = ast.findFirstToken(TokenTypes.IDENT); if (identToken == null) { continue; } if (!IS_ASYNC.equals(identToken.getText())) { continue; } final DetailAST exprToken = ast.findFirstToken(TokenTypes.EXPR); if (exprToken == null) { continue; } return exprToken.branchContains(TokenTypes.LITERAL_TRUE); } return false; } }
class ServiceClientInstantiationCheck extends AbstractCheck { private static final String SERVICE_CLIENT = "ServiceClient"; private static final String BUILDER = "builder"; private static final String ASYNC_CLIENT ="AsyncClient"; private static final String CLIENT = "Client"; private static final String IS_ASYNC = "isAsync"; private static boolean hasServiceClientAnnotation; private static boolean isAsync; private static boolean isImplPackage; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.PACKAGE_DEF, TokenTypes.CLASS_DEF, TokenTypes.CTOR_DEF, TokenTypes.METHOD_DEF, TokenTypes.OBJBLOCK }; } @Override public void beginTree(DetailAST root) { hasServiceClientAnnotation = false; isAsync = false; isImplPackage = false; } @Override /** * Checks if the class is annotated with annotation @ServiceClient. A class could have multiple annotations. * * @param classDefToken the CLASS_DEF AST node * @return true if the class is annotated with @ServiceClient, false otherwise. */ private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); if (annotationIdent != null && SERVICE_CLIENT.equals(annotationIdent.getText())) { isAsync = isAsyncServiceClient(ast); return true; } } return false; } /** * Checks for public or protected constructor for the service client class. * Log error if the service client has public or protected constructor. * * @param ctorToken the CTOR_DEF AST node */ private void checkConstructor(DetailAST ctorToken) { final DetailAST modifiersToken = ctorToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) || accessModifier.equals(AccessModifier.PROTECTED)) { log(modifiersToken, "@ServiceClient class should not have any public or protected constructor."); } } /** * Checks for public static method named 'builder'. Should avoid to use method name, 'builder'. * * @param methodDefToken the METHOD_DEF AST node */ private void checkMethodName(DetailAST methodDefToken) { final DetailAST methodNameToken = methodDefToken.findFirstToken(TokenTypes.IDENT); if (!BUILDER.equals(methodNameToken.getText())) { return; } final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) && modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)) { log(modifiersToken, "@ServiceClient class should not have a public static method named ''builder''."); } } /** * Checks that the field variables in the @ServiceClient are final. ServiceClients should be immutable. * * @param objBlockToken the OBJBLOCK AST node */ private void checkClassField(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF != ast.getType()) { continue; } final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL)) { log(modifiersToken, String.format("The variable field ''%s'' of class ''%s'' should be final. Classes annotated with @ServiceClient are supposed to be immutable.", ast.findFirstToken(TokenTypes.IDENT).getText(), objBlockToken.getPreviousSibling().getText())); } } } /** * Checks for the class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */ private void checkServiceClientNaming(DetailAST classDefToken) { final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } } /** * A function checks if the annotation node has a member key is {@code IS_ASYNC} with value equals to 'true'. * If the value equals 'true', which indicates the @ServiceClient is an asynchronous client. * If the member pair is missing. By default, it is a synchronous service client. * * @param annotationToken the ANNOTATION AST node * @return true if the annotation has {@code IS_ASYNC} value 'true', otherwise, false. */ private boolean isAsyncServiceClient(DetailAST annotationToken) { for (DetailAST ast = annotationToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { continue; } final DetailAST identToken = ast.findFirstToken(TokenTypes.IDENT); if (identToken == null) { continue; } if (!IS_ASYNC.equals(identToken.getText())) { continue; } final DetailAST exprToken = ast.findFirstToken(TokenTypes.EXPR); if (exprToken == null) { continue; } return exprToken.branchContains(TokenTypes.LITERAL_TRUE); } return false; } }
I have made a wrong assumption. I thought one tree traversal will only have one CLASS_DEF. But it could have a nest inner class.
public void visitToken(DetailAST token) { if (!hasServiceClientAnnotation) { return; } switch (token.getType()) { case TokenTypes.CLASS_DEF: hasServiceClientAnnotation = hasServiceClientAnnotation(token); checkServiceClientNaming(token); break; case TokenTypes.CTOR_DEF: checkConstructor(token); break; case TokenTypes.METHOD_DEF: checkMethodName(token); break; case TokenTypes.OBJBLOCK: checkClassField(token); break; default: break; } }
checkConstructor(token);
public void visitToken(DetailAST token) { if (isImplPackage) { return; } switch (token.getType()) { case TokenTypes.PACKAGE_DEF: String packageName = FullIdent.createFullIdent(token.findFirstToken(TokenTypes.DOT)).getText(); isImplPackage = packageName.contains(".implementation"); break; case TokenTypes.CLASS_DEF: hasServiceClientAnnotation = hasServiceClientAnnotation(token); if (hasServiceClientAnnotation) { checkServiceClientNaming(token); } break; case TokenTypes.CTOR_DEF: if (hasServiceClientAnnotation) { checkConstructor(token); } break; case TokenTypes.METHOD_DEF: if (hasServiceClientAnnotation) { checkMethodName(token); } break; case TokenTypes.OBJBLOCK: if (hasServiceClientAnnotation) { checkClassField(token); } break; default: break; } }
class ServiceClientInstantiationCheck extends AbstractCheck { private static final String SERVICE_CLIENT = "ServiceClient"; private static final String BUILDER = "builder"; private static final String ASYNC_CLIENT ="AsyncClient"; private static final String CLIENT = "Client"; private static final String IS_ASYNC = "isAsync"; private static boolean hasServiceClientAnnotation; private static boolean isAsync; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.CLASS_DEF, TokenTypes.CTOR_DEF, TokenTypes.METHOD_DEF, TokenTypes.OBJBLOCK }; } @Override public void beginTree(DetailAST root) { hasServiceClientAnnotation = true; isAsync = false; } @Override /** * Checks if the class is annotated with annotation @ServiceClient. A class could have multiple annotations. * * @param classDefToken the CLASS_DEF AST node * @return true if the class is annotated with @ServiceClient, false otherwise. */ private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); if (annotationIdent != null && SERVICE_CLIENT.equals(annotationIdent.getText())) { isAsync = isAsyncServiceClient(ast); return true; } } return false; } /** * Checks for public or protected constructor for the service client class. * Log error if the service client has public or protected constructor. * * @param ctorToken the CTOR_DEF AST node */ private void checkConstructor(DetailAST ctorToken) { final DetailAST modifiersToken = ctorToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) || accessModifier.equals(AccessModifier.PROTECTED)) { log(modifiersToken, "@ServiceClient class should not have any public or protected constructor."); } } /** * Checks for public static method named 'builder'. Should avoid to use method name, 'builder'. * * @param methodDefToken the METHOD_DEF AST node */ private void checkMethodName(DetailAST methodDefToken) { final DetailAST methodNameToken = methodDefToken.findFirstToken(TokenTypes.IDENT); if (!BUILDER.equals(methodNameToken.getText())) { return; } final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) && modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)) { log(modifiersToken, "@ServiceClient class should not have a public static method named ''builder''."); } } /** * Checks for the variable field of the subclass of ServiceClient. * These fields should be final because these classes supposed to be immutable class. * * @param objBlockToken the OBJBLOCK AST node */ private void checkClassField(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF != ast.getType()) { continue; } final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL)) { log(modifiersToken, String.format("The variable field ''%s'' of class ''%s'' should be final. Classes annotated with @ServiceClient are supposed to be immutable.", ast.findFirstToken(TokenTypes.IDENT).getText(), objBlockToken.getPreviousSibling().getText())); } } } /** * Checks for the class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */ private void checkServiceClientNaming(DetailAST classDefToken) { if (!hasServiceClientAnnotation) { return; } final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } } /** * A function checks if the annotation node has a member key is {@code IS_ASYNC} with value equals to 'true'. * If the value equals 'true', which indicates the @ServiceClient is an asynchronous client. * If the member pair is missing. By default, it is a synchronous service client. * * @param annotationToken the ANNOTATION AST node * @return true if the annotation has {@code IS_ASYNC} value 'true', otherwise, false. */ private boolean isAsyncServiceClient(DetailAST annotationToken) { for (DetailAST ast = annotationToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { continue; } final DetailAST identToken = ast.findFirstToken(TokenTypes.IDENT); if (identToken == null) { continue; } if (!IS_ASYNC.equals(identToken.getText())) { continue; } final DetailAST exprToken = ast.findFirstToken(TokenTypes.EXPR); if (exprToken == null) { continue; } return exprToken.branchContains(TokenTypes.LITERAL_TRUE); } return false; } }
class ServiceClientInstantiationCheck extends AbstractCheck { private static final String SERVICE_CLIENT = "ServiceClient"; private static final String BUILDER = "builder"; private static final String ASYNC_CLIENT ="AsyncClient"; private static final String CLIENT = "Client"; private static final String IS_ASYNC = "isAsync"; private static boolean hasServiceClientAnnotation; private static boolean isAsync; private static boolean isImplPackage; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.PACKAGE_DEF, TokenTypes.CLASS_DEF, TokenTypes.CTOR_DEF, TokenTypes.METHOD_DEF, TokenTypes.OBJBLOCK }; } @Override public void beginTree(DetailAST root) { hasServiceClientAnnotation = false; isAsync = false; isImplPackage = false; } @Override /** * Checks if the class is annotated with annotation @ServiceClient. A class could have multiple annotations. * * @param classDefToken the CLASS_DEF AST node * @return true if the class is annotated with @ServiceClient, false otherwise. */ private boolean hasServiceClientAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } final DetailAST annotationIdent = ast.findFirstToken(TokenTypes.IDENT); if (annotationIdent != null && SERVICE_CLIENT.equals(annotationIdent.getText())) { isAsync = isAsyncServiceClient(ast); return true; } } return false; } /** * Checks for public or protected constructor for the service client class. * Log error if the service client has public or protected constructor. * * @param ctorToken the CTOR_DEF AST node */ private void checkConstructor(DetailAST ctorToken) { final DetailAST modifiersToken = ctorToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) || accessModifier.equals(AccessModifier.PROTECTED)) { log(modifiersToken, "@ServiceClient class should not have any public or protected constructor."); } } /** * Checks for public static method named 'builder'. Should avoid to use method name, 'builder'. * * @param methodDefToken the METHOD_DEF AST node */ private void checkMethodName(DetailAST methodDefToken) { final DetailAST methodNameToken = methodDefToken.findFirstToken(TokenTypes.IDENT); if (!BUILDER.equals(methodNameToken.getText())) { return; } final DetailAST modifiersToken = methodDefToken.findFirstToken(TokenTypes.MODIFIERS); final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken); if (accessModifier.equals(AccessModifier.PUBLIC) && modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)) { log(modifiersToken, "@ServiceClient class should not have a public static method named ''builder''."); } } /** * Checks that the field variables in the @ServiceClient are final. ServiceClients should be immutable. * * @param objBlockToken the OBJBLOCK AST node */ private void checkClassField(DetailAST objBlockToken) { for (DetailAST ast = objBlockToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (TokenTypes.VARIABLE_DEF != ast.getType()) { continue; } final DetailAST modifiersToken = ast.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.FINAL)) { log(modifiersToken, String.format("The variable field ''%s'' of class ''%s'' should be final. Classes annotated with @ServiceClient are supposed to be immutable.", ast.findFirstToken(TokenTypes.IDENT).getText(), objBlockToken.getPreviousSibling().getText())); } } } /** * Checks for the class name of Service Client. It should be named <ServiceName>AsyncClient or <ServiceName>Client. * * @param classDefToken the CLASS_DEF AST node */ private void checkServiceClientNaming(DetailAST classDefToken) { final String className = classDefToken.findFirstToken(TokenTypes.IDENT).getText(); if (isAsync && !className.endsWith(ASYNC_CLIENT)) { log(classDefToken, String.format("Async class ''%s'' must be named <ServiceName>AsyncClient ", className)); } if (!isAsync && !className.endsWith(CLIENT)) { log(classDefToken, String.format("Sync class %s must be named <ServiceName>Client.", className)); } } /** * A function checks if the annotation node has a member key is {@code IS_ASYNC} with value equals to 'true'. * If the value equals 'true', which indicates the @ServiceClient is an asynchronous client. * If the member pair is missing. By default, it is a synchronous service client. * * @param annotationToken the ANNOTATION AST node * @return true if the annotation has {@code IS_ASYNC} value 'true', otherwise, false. */ private boolean isAsyncServiceClient(DetailAST annotationToken) { for (DetailAST ast = annotationToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { continue; } final DetailAST identToken = ast.findFirstToken(TokenTypes.IDENT); if (identToken == null) { continue; } if (!IS_ASYNC.equals(identToken.getText())) { continue; } final DetailAST exprToken = ast.findFirstToken(TokenTypes.EXPR); if (exprToken == null) { continue; } return exprToken.branchContains(TokenTypes.LITERAL_TRUE); } return false; } }
You can merge these two `if` conditions into one
private void checkServiceInterface(DetailAST interfaceDefToken) { DetailAST serviceInterfaceAnnotationNode = null; String nameValue = null; DetailAST modifiersToken = interfaceDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } for (DetailAST annotationChild = ast.getFirstChild(); annotationChild != null; annotationChild = annotationChild.getNextSibling()) { if (annotationChild.getType() == TokenTypes.IDENT) { if (!"ServiceInterface".equals(annotationChild.getText())) { break; } else { serviceInterfaceAnnotationNode = ast; } } if (annotationChild.getType() == TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { if ("name".equals(annotationChild.findFirstToken(TokenTypes.IDENT).getText())) { nameValue = getNamePropertyValue(annotationChild.findFirstToken(TokenTypes.EXPR)); } } } } if (serviceInterfaceAnnotationNode == null) { return; } if (nameValue.isEmpty()) { log(serviceInterfaceAnnotationNode, String.format("The ''name'' property of @ServiceInterface, ''%s'' should not be empty.", nameValue)); } if (nameValue.contains(" ")) { log(serviceInterfaceAnnotationNode, String.format("The ''name'' property of @ServiceInterface, ''%s'' should not contain white space.", nameValue)); } if (nameValue.length() > 10) { log(serviceInterfaceAnnotationNode, "[DEBUG] length = " + nameValue.length() + ", name = " + nameValue); log(serviceInterfaceAnnotationNode, String.format("The ''name'' property of @ServiceInterface ''%s'' should not have a length > 10.", nameValue)); } }
if ("name".equals(annotationChild.findFirstToken(TokenTypes.IDENT).getText())) {
private void checkServiceInterface(DetailAST interfaceDefToken) { DetailAST serviceInterfaceAnnotationNode = null; String nameValue = null; DetailAST modifiersToken = interfaceDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } if (!"ServiceInterface".equals(ast.findFirstToken(TokenTypes.IDENT).getText())) { continue; } serviceInterfaceAnnotationNode = ast; DetailAST annotationMemberValuePairToken = ast.findFirstToken(TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR); if ("name".equals(annotationMemberValuePairToken.findFirstToken(TokenTypes.IDENT).getText())) { nameValue = getNamePropertyValue(annotationMemberValuePairToken.findFirstToken(TokenTypes.EXPR)); break; } } if (serviceInterfaceAnnotationNode == null) { return; } Pattern serviceNamePattern = Pattern.compile("^[a-zA-Z0-9]{1,20}$"); if (!serviceNamePattern.matcher(nameValue).find()) { log(serviceInterfaceAnnotationNode, String.format( "The ''name'' property of @ServiceInterface, ''%s'' should be non-empty, alphanumeric and not more than 10 characters", nameValue)); } }
class should have the following rules: * 1) The annotation property 'name' should be non-empty * 2) The length of value of property 'name' should be less than 10 characters and without space * * @param interfaceDefToken INTERFACE_DEF AST node */
class should have the following rules: * 1) The annotation property 'name' should be non-empty * 2) The length of value of property 'name' should be less than 20 characters and without space * * @param interfaceDefToken INTERFACE_DEF AST node */
You can `break` after you have found the `nameValue`
private void checkServiceInterface(DetailAST interfaceDefToken) { DetailAST serviceInterfaceAnnotationNode = null; String nameValue = null; DetailAST modifiersToken = interfaceDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } for (DetailAST annotationChild = ast.getFirstChild(); annotationChild != null; annotationChild = annotationChild.getNextSibling()) { if (annotationChild.getType() == TokenTypes.IDENT) { if (!"ServiceInterface".equals(annotationChild.getText())) { break; } else { serviceInterfaceAnnotationNode = ast; } } if (annotationChild.getType() == TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { if ("name".equals(annotationChild.findFirstToken(TokenTypes.IDENT).getText())) { nameValue = getNamePropertyValue(annotationChild.findFirstToken(TokenTypes.EXPR)); } } } } if (serviceInterfaceAnnotationNode == null) { return; } if (nameValue.isEmpty()) { log(serviceInterfaceAnnotationNode, String.format("The ''name'' property of @ServiceInterface, ''%s'' should not be empty.", nameValue)); } if (nameValue.contains(" ")) { log(serviceInterfaceAnnotationNode, String.format("The ''name'' property of @ServiceInterface, ''%s'' should not contain white space.", nameValue)); } if (nameValue.length() > 10) { log(serviceInterfaceAnnotationNode, "[DEBUG] length = " + nameValue.length() + ", name = " + nameValue); log(serviceInterfaceAnnotationNode, String.format("The ''name'' property of @ServiceInterface ''%s'' should not have a length > 10.", nameValue)); } }
nameValue = getNamePropertyValue(annotationChild.findFirstToken(TokenTypes.EXPR));
private void checkServiceInterface(DetailAST interfaceDefToken) { DetailAST serviceInterfaceAnnotationNode = null; String nameValue = null; DetailAST modifiersToken = interfaceDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } if (!"ServiceInterface".equals(ast.findFirstToken(TokenTypes.IDENT).getText())) { continue; } serviceInterfaceAnnotationNode = ast; DetailAST annotationMemberValuePairToken = ast.findFirstToken(TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR); if ("name".equals(annotationMemberValuePairToken.findFirstToken(TokenTypes.IDENT).getText())) { nameValue = getNamePropertyValue(annotationMemberValuePairToken.findFirstToken(TokenTypes.EXPR)); break; } } if (serviceInterfaceAnnotationNode == null) { return; } Pattern serviceNamePattern = Pattern.compile("^[a-zA-Z0-9]{1,20}$"); if (!serviceNamePattern.matcher(nameValue).find()) { log(serviceInterfaceAnnotationNode, String.format( "The ''name'' property of @ServiceInterface, ''%s'' should be non-empty, alphanumeric and not more than 10 characters", nameValue)); } }
class should have the following rules: * 1) The annotation property 'name' should be non-empty * 2) The length of value of property 'name' should be less than 10 characters and without space * * @param interfaceDefToken INTERFACE_DEF AST node */
class should have the following rules: * 1) The annotation property 'name' should be non-empty * 2) The length of value of property 'name' should be less than 20 characters and without space * * @param interfaceDefToken INTERFACE_DEF AST node */
I think you can use a simple regex to check all these 3 rules at once. ``` Pattern serviceNamePattern = Pattern.compile("^[a-zA-Z0-9]{1,10}$"); if (!serviceNamePattern.matcher(nameValue).find()) { log(serviceInterfaceAnnotationNode, String.format( "The ''name'' property of @ServiceInterface, ''%s'' should be non-empty, alphanumeric and not more than 10 characters", nameValue)); } ```
private void checkServiceInterface(DetailAST interfaceDefToken) { DetailAST serviceInterfaceAnnotationNode = null; String nameValue = null; DetailAST modifiersToken = interfaceDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } for (DetailAST annotationChild = ast.getFirstChild(); annotationChild != null; annotationChild = annotationChild.getNextSibling()) { if (annotationChild.getType() == TokenTypes.IDENT) { if (!"ServiceInterface".equals(annotationChild.getText())) { break; } else { serviceInterfaceAnnotationNode = ast; } } if (annotationChild.getType() == TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { if ("name".equals(annotationChild.findFirstToken(TokenTypes.IDENT).getText())) { nameValue = getNamePropertyValue(annotationChild.findFirstToken(TokenTypes.EXPR)); } } } } if (serviceInterfaceAnnotationNode == null) { return; } if (nameValue.isEmpty()) { log(serviceInterfaceAnnotationNode, String.format("The ''name'' property of @ServiceInterface, ''%s'' should not be empty.", nameValue)); } if (nameValue.contains(" ")) { log(serviceInterfaceAnnotationNode, String.format("The ''name'' property of @ServiceInterface, ''%s'' should not contain white space.", nameValue)); } if (nameValue.length() > 10) { log(serviceInterfaceAnnotationNode, "[DEBUG] length = " + nameValue.length() + ", name = " + nameValue); log(serviceInterfaceAnnotationNode, String.format("The ''name'' property of @ServiceInterface ''%s'' should not have a length > 10.", nameValue)); } }
if (nameValue.isEmpty()) {
private void checkServiceInterface(DetailAST interfaceDefToken) { DetailAST serviceInterfaceAnnotationNode = null; String nameValue = null; DetailAST modifiersToken = interfaceDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } if (!"ServiceInterface".equals(ast.findFirstToken(TokenTypes.IDENT).getText())) { continue; } serviceInterfaceAnnotationNode = ast; DetailAST annotationMemberValuePairToken = ast.findFirstToken(TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR); if ("name".equals(annotationMemberValuePairToken.findFirstToken(TokenTypes.IDENT).getText())) { nameValue = getNamePropertyValue(annotationMemberValuePairToken.findFirstToken(TokenTypes.EXPR)); break; } } if (serviceInterfaceAnnotationNode == null) { return; } Pattern serviceNamePattern = Pattern.compile("^[a-zA-Z0-9]{1,20}$"); if (!serviceNamePattern.matcher(nameValue).find()) { log(serviceInterfaceAnnotationNode, String.format( "The ''name'' property of @ServiceInterface, ''%s'' should be non-empty, alphanumeric and not more than 10 characters", nameValue)); } }
class should have the following rules: * 1) The annotation property 'name' should be non-empty * 2) The length of value of property 'name' should be less than 10 characters and without space * * @param interfaceDefToken INTERFACE_DEF AST node */
class should have the following rules: * 1) The annotation property 'name' should be non-empty * 2) The length of value of property 'name' should be less than 20 characters and without space * * @param interfaceDefToken INTERFACE_DEF AST node */
Consider extracting some parts of this for loop into smaller methods for better readability.
private void checkServiceInterface(DetailAST interfaceDefToken) { DetailAST serviceInterfaceAnnotationNode = null; String nameValue = null; DetailAST modifiersToken = interfaceDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } for (DetailAST annotationChild = ast.getFirstChild(); annotationChild != null; annotationChild = annotationChild.getNextSibling()) { if (annotationChild.getType() == TokenTypes.IDENT) { if (!"ServiceInterface".equals(annotationChild.getText())) { break; } else { serviceInterfaceAnnotationNode = ast; } } if (annotationChild.getType() == TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR) { if ("name".equals(annotationChild.findFirstToken(TokenTypes.IDENT).getText())) { nameValue = getNamePropertyValue(annotationChild.findFirstToken(TokenTypes.EXPR)); } } } } if (serviceInterfaceAnnotationNode == null) { return; } if (nameValue.isEmpty()) { log(serviceInterfaceAnnotationNode, String.format("The ''name'' property of @ServiceInterface, ''%s'' should not be empty.", nameValue)); } if (nameValue.contains(" ")) { log(serviceInterfaceAnnotationNode, String.format("The ''name'' property of @ServiceInterface, ''%s'' should not contain white space.", nameValue)); } if (nameValue.length() > 10) { log(serviceInterfaceAnnotationNode, "[DEBUG] length = " + nameValue.length() + ", name = " + nameValue); log(serviceInterfaceAnnotationNode, String.format("The ''name'' property of @ServiceInterface ''%s'' should not have a length > 10.", nameValue)); } }
for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) {
private void checkServiceInterface(DetailAST interfaceDefToken) { DetailAST serviceInterfaceAnnotationNode = null; String nameValue = null; DetailAST modifiersToken = interfaceDefToken.findFirstToken(TokenTypes.MODIFIERS); for (DetailAST ast = modifiersToken.getFirstChild(); ast != null; ast = ast.getNextSibling()) { if (ast.getType() != TokenTypes.ANNOTATION) { continue; } if (!"ServiceInterface".equals(ast.findFirstToken(TokenTypes.IDENT).getText())) { continue; } serviceInterfaceAnnotationNode = ast; DetailAST annotationMemberValuePairToken = ast.findFirstToken(TokenTypes.ANNOTATION_MEMBER_VALUE_PAIR); if ("name".equals(annotationMemberValuePairToken.findFirstToken(TokenTypes.IDENT).getText())) { nameValue = getNamePropertyValue(annotationMemberValuePairToken.findFirstToken(TokenTypes.EXPR)); break; } } if (serviceInterfaceAnnotationNode == null) { return; } Pattern serviceNamePattern = Pattern.compile("^[a-zA-Z0-9]{1,20}$"); if (!serviceNamePattern.matcher(nameValue).find()) { log(serviceInterfaceAnnotationNode, String.format( "The ''name'' property of @ServiceInterface, ''%s'' should be non-empty, alphanumeric and not more than 10 characters", nameValue)); } }
class should have the following rules: * 1) The annotation property 'name' should be non-empty * 2) The length of value of property 'name' should be less than 10 characters and without space * * @param interfaceDefToken INTERFACE_DEF AST node */
class should have the following rules: * 1) The annotation property 'name' should be non-empty * 2) The length of value of property 'name' should be less than 20 characters and without space * * @param interfaceDefToken INTERFACE_DEF AST node */
There should also be a check to ensure there is at least one method that starts with "build".
public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: hasServiceClientBuilderAnnotationStack.push(hasServiceClientBuilderAnnotation); final DetailAST serviceClientAnnotationBuilderToken = getServiceClientBuilderAnnotation(token); final String className = token.findFirstToken(TokenTypes.IDENT).getText(); hasServiceClientBuilderAnnotation = serviceClientAnnotationBuilderToken != null; if (hasServiceClientBuilderAnnotation) { if (!className.endsWith("ClientBuilder")) { log(token, String.format("Class annotated with @ServiceClientBuilder ''%s'' should be named <ServiceName>ClientBuilder.", className)); } } else { if (className.endsWith("ClientBuilder")) { log(token, String.format("Class ''%s'' should be annotated with @ServiceClientBuilder.", className)); } } break; case TokenTypes.METHOD_DEF: if (!hasServiceClientBuilderAnnotation) { return; } final String methodName = token.findFirstToken(TokenTypes.IDENT).getText(); if (methodName.startsWith("build") && !BUILD_ASYNC_CLIENT.equals(methodName) && !BUILD_CLIENT.equals(methodName)) { log(token, String.format( "@ServiceClientBuilder class should not have a method name, '''' starting with ''build'' " + "other than ''buildClient'' or ''buildAsyncClient''." , methodName)); } break; default: break; } }
if (methodName.startsWith("build") && !BUILD_ASYNC_CLIENT.equals(methodName) && !BUILD_CLIENT.equals(methodName)) {
public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: hasServiceClientBuilderAnnotationStack.push(hasServiceClientBuilderAnnotation); hasBuildMethodStack.push(hasBuildMethod); final DetailAST serviceClientAnnotationBuilderToken = getServiceClientBuilderAnnotation(token); final String className = token.findFirstToken(TokenTypes.IDENT).getText(); hasServiceClientBuilderAnnotation = serviceClientAnnotationBuilderToken != null; if (hasServiceClientBuilderAnnotation) { if (!className.endsWith("ClientBuilder")) { log(token, String.format("Class annotated with @ServiceClientBuilder ''%s'' should be named <ServiceName>ClientBuilder.", className)); } } else { if (className.endsWith("ClientBuilder")) { log(token, String.format("Class ''%s'' should be annotated with @ServiceClientBuilder.", className)); } } break; case TokenTypes.METHOD_DEF: if (!hasServiceClientBuilderAnnotation) { return; } final String methodName = token.findFirstToken(TokenTypes.IDENT).getText(); if (!methodName.startsWith("build")) { break; } hasBuildMethod = true; if (!methodName.endsWith("Client")) { log(token, String.format( "@ServiceClientBuilder class should not have a method name, ''%s'' starting with ''build'' but not ending with ''Client''." , methodName)); } break; default: break; } }
class ServiceClientBuilderCheck extends AbstractCheck { private static final String SERVICE_CLIENT_BUILDER = "ServiceClientBuilder"; private static final String BUILD_CLIENT = "buildClient"; private static final String BUILD_ASYNC_CLIENT = "buildAsyncClient"; private Stack<Boolean> hasServiceClientBuilderAnnotationStack = new Stack(); private boolean hasServiceClientBuilderAnnotation; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.CLASS_DEF, TokenTypes.METHOD_DEF }; } @Override public void leaveToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: hasServiceClientBuilderAnnotation = hasServiceClientBuilderAnnotationStack.pop(); break; default: break; } } @Override /** * Checks if the class is annotated with @ServiceClientBuilder. * * @param classDefToken the CLASS_DEF AST node * @return the annotation node if the class is annotated with @ServiceClientBuilder, null otherwise. */ private DetailAST getServiceClientBuilderAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.ANNOTATION)) { return null; } DetailAST annotationToken = modifiersToken.findFirstToken(TokenTypes.ANNOTATION); if (!SERVICE_CLIENT_BUILDER.equals(annotationToken.findFirstToken(TokenTypes.IDENT).getText())) { return null; } return annotationToken; } }
class ServiceClientBuilderCheck extends AbstractCheck { private static final String SERVICE_CLIENT_BUILDER = "ServiceClientBuilder"; private Stack<Boolean> hasServiceClientBuilderAnnotationStack = new Stack(); private Stack<Boolean> hasBuildMethodStack = new Stack<>(); private boolean hasServiceClientBuilderAnnotation; private boolean hasBuildMethod; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.CLASS_DEF, TokenTypes.METHOD_DEF }; } @Override public void leaveToken(DetailAST token) { if (token.getType() == TokenTypes.CLASS_DEF) { hasServiceClientBuilderAnnotation = hasServiceClientBuilderAnnotationStack.pop(); hasBuildMethod = hasBuildMethodStack.pop(); if (hasServiceClientBuilderAnnotation && !hasBuildMethod) { log(token, "Class with @ServiceClientBuilder annotation must have a method starting with ''build'' and ending with ''Client''."); } } } @Override /** * Checks if the class is annotated with @ServiceClientBuilder. * * @param classDefToken the CLASS_DEF AST node * @return the annotation node if the class is annotated with @ServiceClientBuilder, null otherwise. */ private DetailAST getServiceClientBuilderAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.ANNOTATION)) { return null; } DetailAST annotationToken = modifiersToken.findFirstToken(TokenTypes.ANNOTATION); if (!SERVICE_CLIENT_BUILDER.equals(annotationToken.findFirstToken(TokenTypes.IDENT).getText())) { return null; } return annotationToken; } }
Same here. Only one `case` here - replace `switch` with `if`.
public void leaveToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: hasServiceClientBuilderAnnotation = hasServiceClientBuilderAnnotationStack.pop(); break; default: break; } }
case TokenTypes.CLASS_DEF:
public void leaveToken(DetailAST token) { if (token.getType() == TokenTypes.CLASS_DEF) { hasServiceClientBuilderAnnotation = hasServiceClientBuilderAnnotationStack.pop(); hasBuildMethod = hasBuildMethodStack.pop(); if (hasServiceClientBuilderAnnotation && !hasBuildMethod) { log(token, "Class with @ServiceClientBuilder annotation must have a method starting with ''build'' and ending with ''Client''."); } } }
class ServiceClientBuilderCheck extends AbstractCheck { private static final String SERVICE_CLIENT_BUILDER = "ServiceClientBuilder"; private static final String BUILD_CLIENT = "buildClient"; private static final String BUILD_ASYNC_CLIENT = "buildAsyncClient"; private Stack<Boolean> hasServiceClientBuilderAnnotationStack = new Stack(); private boolean hasServiceClientBuilderAnnotation; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.CLASS_DEF, TokenTypes.METHOD_DEF }; } @Override @Override public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: hasServiceClientBuilderAnnotationStack.push(hasServiceClientBuilderAnnotation); final DetailAST serviceClientAnnotationBuilderToken = getServiceClientBuilderAnnotation(token); final String className = token.findFirstToken(TokenTypes.IDENT).getText(); hasServiceClientBuilderAnnotation = serviceClientAnnotationBuilderToken != null; if (hasServiceClientBuilderAnnotation) { if (!className.endsWith("ClientBuilder")) { log(token, String.format("Class annotated with @ServiceClientBuilder ''%s'' should be named <ServiceName>ClientBuilder.", className)); } } else { if (className.endsWith("ClientBuilder")) { log(token, String.format("Class ''%s'' should be annotated with @ServiceClientBuilder.", className)); } } break; case TokenTypes.METHOD_DEF: if (!hasServiceClientBuilderAnnotation) { return; } final String methodName = token.findFirstToken(TokenTypes.IDENT).getText(); if (methodName.startsWith("build") && !BUILD_ASYNC_CLIENT.equals(methodName) && !BUILD_CLIENT.equals(methodName)) { log(token, String.format( "@ServiceClientBuilder class should not have a method name, '''' starting with ''build'' " + "other than ''buildClient'' or ''buildAsyncClient''." , methodName)); } break; default: break; } } /** * Checks if the class is annotated with @ServiceClientBuilder. * * @param classDefToken the CLASS_DEF AST node * @return the annotation node if the class is annotated with @ServiceClientBuilder, null otherwise. */ private DetailAST getServiceClientBuilderAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.ANNOTATION)) { return null; } DetailAST annotationToken = modifiersToken.findFirstToken(TokenTypes.ANNOTATION); if (!SERVICE_CLIENT_BUILDER.equals(annotationToken.findFirstToken(TokenTypes.IDENT).getText())) { return null; } return annotationToken; } }
class ServiceClientBuilderCheck extends AbstractCheck { private static final String SERVICE_CLIENT_BUILDER = "ServiceClientBuilder"; private Stack<Boolean> hasServiceClientBuilderAnnotationStack = new Stack(); private Stack<Boolean> hasBuildMethodStack = new Stack<>(); private boolean hasServiceClientBuilderAnnotation; private boolean hasBuildMethod; @Override public int[] getDefaultTokens() { return getRequiredTokens(); } @Override public int[] getAcceptableTokens() { return getRequiredTokens(); } @Override public int[] getRequiredTokens() { return new int[] { TokenTypes.CLASS_DEF, TokenTypes.METHOD_DEF }; } @Override @Override public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: hasServiceClientBuilderAnnotationStack.push(hasServiceClientBuilderAnnotation); hasBuildMethodStack.push(hasBuildMethod); final DetailAST serviceClientAnnotationBuilderToken = getServiceClientBuilderAnnotation(token); final String className = token.findFirstToken(TokenTypes.IDENT).getText(); hasServiceClientBuilderAnnotation = serviceClientAnnotationBuilderToken != null; if (hasServiceClientBuilderAnnotation) { if (!className.endsWith("ClientBuilder")) { log(token, String.format("Class annotated with @ServiceClientBuilder ''%s'' should be named <ServiceName>ClientBuilder.", className)); } } else { if (className.endsWith("ClientBuilder")) { log(token, String.format("Class ''%s'' should be annotated with @ServiceClientBuilder.", className)); } } break; case TokenTypes.METHOD_DEF: if (!hasServiceClientBuilderAnnotation) { return; } final String methodName = token.findFirstToken(TokenTypes.IDENT).getText(); if (!methodName.startsWith("build")) { break; } hasBuildMethod = true; if (!methodName.endsWith("Client")) { log(token, String.format( "@ServiceClientBuilder class should not have a method name, ''%s'' starting with ''build'' but not ending with ''Client''." , methodName)); } break; default: break; } } /** * Checks if the class is annotated with @ServiceClientBuilder. * * @param classDefToken the CLASS_DEF AST node * @return the annotation node if the class is annotated with @ServiceClientBuilder, null otherwise. */ private DetailAST getServiceClientBuilderAnnotation(DetailAST classDefToken) { final DetailAST modifiersToken = classDefToken.findFirstToken(TokenTypes.MODIFIERS); if (!modifiersToken.branchContains(TokenTypes.ANNOTATION)) { return null; } DetailAST annotationToken = modifiersToken.findFirstToken(TokenTypes.ANNOTATION); if (!SERVICE_CLIENT_BUILDER.equals(annotationToken.findFirstToken(TokenTypes.IDENT).getText())) { return null; } return annotationToken; } }
Should look into having a ConnectionStringParser class in Azure Core, I've been seeing this functionality in a lot of places.
private void getEndPointFromConnectionString(String connectionString) { HashMap<String, String> connectionStringPieces = new HashMap<>(); for (String connectionStringPiece : connectionString.split(";")) { String[] kvp = connectionStringPiece.split("=", 2); connectionStringPieces.put(kvp[0].toLowerCase(Locale.ROOT), kvp[1]); } String accountName = connectionStringPieces.get(ACCOUNT_NAME); try { this.endpoint = new URL(String.format("https: } catch (MalformedURLException e) { throw new IllegalArgumentException(String.format("There is no valid account for the connection string. " + "Connection String: %s", connectionString)); } }
for (String connectionStringPiece : connectionString.split(";")) {
private void getEndPointFromConnectionString(String connectionString) { HashMap<String, String> connectionStringPieces = new HashMap<>(); for (String connectionStringPiece : connectionString.split(";")) { String[] kvp = connectionStringPiece.split("=", 2); connectionStringPieces.put(kvp[0].toLowerCase(Locale.ROOT), kvp[1]); } String accountName = connectionStringPieces.get(ACCOUNT_NAME); try { this.endpoint = new URL(String.format("https: } catch (MalformedURLException e) { LOGGER.asError().log("There is no valid account for the connection string. " + "Connection String: %s", connectionString); throw new IllegalArgumentException(String.format("There is no valid account for the connection string. " + "Connection String: %s", connectionString)); } }
class QueueClientBuilder { private static final String ACCOUNT_NAME = "accountname"; private final List<HttpPipelinePolicy> policies; private URL endpoint; private String queueName; private SASTokenCredential sasTokenCredential; private SharedKeyCredential sharedKeyCredential; private HttpClient httpClient; private HttpPipeline pipeline; private HttpLogDetailLevel logLevel; private RetryPolicy retryPolicy; private Configuration configuration; /** * Creates a builder instance that is able to configure and construct {@link QueueClient QueueClients} * and {@link QueueAsyncClient QueueAsyncClients}. */ public QueueClientBuilder() { retryPolicy = new RetryPolicy(); logLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); configuration = ConfigurationManager.getConfiguration(); } /** * Creates a {@link QueueClient} based on options set in the builder. Every time {@code buildClient()} is * called a new instance of {@link QueueClient} is created. * * <p> * If {@link QueueClientBuilder * {@link QueueClientBuilder * {@link QueueClientBuilder * All other builder settings are ignored. * </p> * * @return A QueueClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} or {@code queueName} have not been set. * @throws IllegalStateException If neither a {@link SharedKeyCredential} or {@link SASTokenCredential} has been set. */ public QueueClient buildClient() { return new QueueClient(buildAsyncClient()); } /** * Creates a {@link QueueAsyncClient} based on options set in the builder. Every time {@code buildAsyncClient()} is * called a new instance of {@link QueueAsyncClient} is created. * * <p> * If {@link QueueClientBuilder * {@link QueueClientBuilder * {@link QueueClientBuilder * All other builder settings are ignored. * </p> * * @return A QueueAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} or {@code queueName} have not been set. * @throws IllegalArgumentException If neither a {@link SharedKeyCredential} or {@link SASTokenCredential} has been set. */ public QueueAsyncClient buildAsyncClient() { Objects.requireNonNull(endpoint); Objects.requireNonNull(queueName); if (pipeline != null) { return new QueueAsyncClient(endpoint, pipeline, queueName); } if (sasTokenCredential == null && sharedKeyCredential == null) { throw new IllegalArgumentException("Credentials are required for authorization"); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(QueueConfiguration.NAME, QueueConfiguration.VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (sharedKeyCredential != null) { policies.add(new SharedKeyCredentialPolicy(sharedKeyCredential)); } else { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(logLevel)); HttpPipeline pipeline = HttpPipeline.builder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new QueueAsyncClient(endpoint, pipeline, queueName); } /** * Sets the endpoint for the Azure Storage Queue instance that the client will interact with. * * <p>The first path segment, if the endpoint contains path segments, will be assumed to be the name of the queue * that the client will interact with.</p> * * <p>Query parameters of the endpoint will be parsed using {@link SASTokenCredential * attempt to generate a {@link SASTokenCredential} to authenticate requests sent to the service.</p> * * @param endpoint The URL of the Azure Storage Queue instance to send service requests to and receive responses from. * @return the updated QueueClientBuilder object * @throws IllegalArgumentException If {@code endpoint} isn't a proper URL */ public QueueClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); try { URL fullURL = new URL(endpoint); this.endpoint = new URL(fullURL.getProtocol() + ": String[] pathSegments = fullURL.getPath().split("/", 2); if (pathSegments.length == 2 && !ImplUtils.isNullOrEmpty(pathSegments[1])) { this.queueName = pathSegments[1]; } SASTokenCredential credential = SASTokenCredential.fromQuery(fullURL.getQuery()); if (credential != null) { this.sasTokenCredential = credential; } } catch (MalformedURLException ex) { throw new IllegalArgumentException("The Azure Storage Queue endpoint url is malformed."); } return this; } /** * Sets the name of the queue that the client will interact with. * * @param queueName Name of the queue * @return the updated QueueClientBuilder object * @throws NullPointerException If {@code queueName} is {@code null}. */ public QueueClientBuilder queueName(String queueName) { this.queueName = Objects.requireNonNull(queueName); return this; } /** * Sets the {@link SASTokenCredential} used to authenticate requests sent to the Queue service. * * @param credential SAS token credential generated from the Storage account that authorizes requests * @return the updated QueueClientBuilder object * @throws NullPointerException If {@code credential} is {@code null}. */ public QueueClientBuilder credential(SASTokenCredential credential) { this.sasTokenCredential = Objects.requireNonNull(credential); return this; } /** * Creates a {@link SharedKeyCredential} from the {@code connectionString} used to authenticate requests sent to the * Queue service. * * @param connectionString Connection string from the Access Keys section in the Storage account * @return the updated QueueClientBuilder object * @throws NullPointerException If {@code connectionString} is {@code null}. */ public QueueClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); this.sharedKeyCredential = SharedKeyCredential.fromConnectionString(connectionString); Objects.requireNonNull(connectionString); this.sharedKeyCredential = SharedKeyCredential.fromConnectionString(connectionString); getEndPointFromConnectionString(connectionString); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param httpClient The HTTP client to use for requests. * @return The updated QueueClientBuilder object. * @throws NullPointerException If {@code httpClient} is {@code null}. */ public QueueClientBuilder httpClient(HttpClient httpClient) { this.httpClient = Objects.requireNonNull(httpClient); return this; } /** * Adds a policy to the set of existing policies that are executed after the {@link RetryPolicy}. * * @param pipelinePolicy The retry policy for service requests. * @return The updated QueueClientBuilder object. * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public QueueClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy); this.policies.add(pipelinePolicy); return this; } /** * Sets the logging level for HTTP requests and responses. * * @param logLevel The amount of logging output when sending and receiving HTTP requests/responses. * @return The updated QueueClientBuilder object. */ public QueueClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { this.logLevel = logLevel; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link QueueClientBuilder * and {@link QueueClientBuilder * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated QueueClientBuilder object. * @throws NullPointerException If {@code pipeline} is {@code null}. */ public QueueClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated QueueClientBuilder object. */ public QueueClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } }
class QueueClientBuilder { private static final ClientLogger LOGGER = new ClientLogger(QueueClientBuilder.class); private static final String ACCOUNT_NAME = "accountname"; private final List<HttpPipelinePolicy> policies; private URL endpoint; private String queueName; private SASTokenCredential sasTokenCredential; private SharedKeyCredential sharedKeyCredential; private HttpClient httpClient; private HttpPipeline pipeline; private HttpLogDetailLevel logLevel; private RetryPolicy retryPolicy; private Configuration configuration; /** * Creates a builder instance that is able to configure and construct {@link QueueClient QueueClients} * and {@link QueueAsyncClient QueueAsyncClients}. */ public QueueClientBuilder() { retryPolicy = new RetryPolicy(); logLevel = HttpLogDetailLevel.NONE; policies = new ArrayList<>(); configuration = ConfigurationManager.getConfiguration(); } /** * Creates a {@link QueueClient} based on options set in the builder. Every time {@code buildClient()} is * called a new instance of {@link QueueClient} is created. * * <p> * If {@link QueueClientBuilder * {@link QueueClientBuilder * {@link QueueClientBuilder * All other builder settings are ignored. * </p> * * @return A QueueClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} or {@code queueName} have not been set. * @throws IllegalStateException If neither a {@link SharedKeyCredential} or {@link SASTokenCredential} has been set. */ public QueueClient buildClient() { return new QueueClient(buildAsyncClient()); } /** * Creates a {@link QueueAsyncClient} based on options set in the builder. Every time {@code buildAsyncClient()} is * called a new instance of {@link QueueAsyncClient} is created. * * <p> * If {@link QueueClientBuilder * {@link QueueClientBuilder * {@link QueueClientBuilder * All other builder settings are ignored. * </p> * * @return A QueueAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} or {@code queueName} have not been set. * @throws IllegalArgumentException If neither a {@link SharedKeyCredential} or {@link SASTokenCredential} has been set. */ public QueueAsyncClient buildAsyncClient() { Objects.requireNonNull(endpoint); Objects.requireNonNull(queueName); if (sasTokenCredential == null && sharedKeyCredential == null) { LOGGER.asError().log("Credentials are required for authorization"); throw new IllegalArgumentException("Credentials are required for authorization"); } if (pipeline != null) { return new QueueAsyncClient(endpoint, pipeline, queueName); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(QueueConfiguration.NAME, QueueConfiguration.VERSION, configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); if (sharedKeyCredential != null) { policies.add(new SharedKeyCredentialPolicy(sharedKeyCredential)); } else { policies.add(new SASTokenCredentialPolicy(sasTokenCredential)); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(logLevel)); HttpPipeline pipeline = HttpPipeline.builder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new QueueAsyncClient(endpoint, pipeline, queueName); } /** * Sets the endpoint for the Azure Storage Queue instance that the client will interact with. * * <p>The first path segment, if the endpoint contains path segments, will be assumed to be the name of the queue * that the client will interact with.</p> * * <p>Query parameters of the endpoint will be parsed using {@link SASTokenCredential * attempt to generate a {@link SASTokenCredential} to authenticate requests sent to the service.</p> * * @param endpoint The URL of the Azure Storage Queue instance to send service requests to and receive responses from. * @return the updated QueueClientBuilder object * @throws IllegalArgumentException If {@code endpoint} isn't a proper URL */ public QueueClientBuilder endpoint(String endpoint) { Objects.requireNonNull(endpoint); try { URL fullURL = new URL(endpoint); this.endpoint = new URL(fullURL.getProtocol() + ": String[] pathSegments = fullURL.getPath().split("/", 2); if (pathSegments.length == 2 && !ImplUtils.isNullOrEmpty(pathSegments[1])) { this.queueName = pathSegments[1]; } SASTokenCredential credential = SASTokenCredential.fromQuery(fullURL.getQuery()); if (credential != null) { this.sasTokenCredential = credential; } } catch (MalformedURLException ex) { LOGGER.asError().log("The Azure Storage Queue endpoint url is malformed. Endpoint: " + endpoint); throw new IllegalArgumentException("The Azure Storage Queue endpoint url is malformed. Endpoint: " + endpoint); } return this; } /** * Sets the name of the queue that the client will interact with. * * @param queueName Name of the queue * @return the updated QueueClientBuilder object * @throws NullPointerException If {@code queueName} is {@code null}. */ public QueueClientBuilder queueName(String queueName) { this.queueName = Objects.requireNonNull(queueName); return this; } /** * Sets the {@link SASTokenCredential} used to authenticate requests sent to the Queue. * * @param credential SAS token credential generated from the Storage account that authorizes requests * @return the updated QueueClientBuilder object * @throws NullPointerException If {@code credential} is {@code null}. */ public QueueClientBuilder credential(SASTokenCredential credential) { this.sasTokenCredential = Objects.requireNonNull(credential); return this; } /** * Sets the {@link SharedKeyCredential} used to authenticate requests sent to the Queue. * * @param credential Shared key credential can retrieve from the Storage account that authorizes requests * @return the updated QueueServiceClientBuilder object * @throws NullPointerException If {@code credential} is {@code null}. */ public QueueClientBuilder credential(SharedKeyCredential credential) { this.sharedKeyCredential = Objects.requireNonNull(credential); return this; } /** * Creates a {@link SharedKeyCredential} from the {@code connectionString} used to authenticate requests sent to the * Queue service. * * @param connectionString Connection string from the Access Keys section in the Storage account * @return the updated QueueClientBuilder object * @throws NullPointerException If {@code connectionString} is {@code null}. */ public QueueClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); this.sharedKeyCredential = SharedKeyCredential.fromConnectionString(connectionString); getEndPointFromConnectionString(connectionString); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param httpClient The HTTP client to use for requests. * @return The updated QueueClientBuilder object. * @throws NullPointerException If {@code httpClient} is {@code null}. */ public QueueClientBuilder httpClient(HttpClient httpClient) { this.httpClient = Objects.requireNonNull(httpClient); return this; } /** * Adds a policy to the set of existing policies that are executed after the {@link RetryPolicy}. * * @param pipelinePolicy The retry policy for service requests. * @return The updated QueueClientBuilder object. * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public QueueClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy); this.policies.add(pipelinePolicy); return this; } /** * Sets the logging level for HTTP requests and responses. * * @param logLevel The amount of logging output when sending and receiving HTTP requests/responses. * @return The updated QueueClientBuilder object. */ public QueueClientBuilder httpLogDetailLevel(HttpLogDetailLevel logLevel) { this.logLevel = logLevel; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link QueueClientBuilder * and {@link QueueClientBuilder * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated QueueClientBuilder object. * @throws NullPointerException If {@code pipeline} is {@code null}. */ public QueueClientBuilder pipeline(HttpPipeline pipeline) { Objects.requireNonNull(pipeline); this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link ConfigurationManager * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated QueueClientBuilder object. */ public QueueClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } }
Blobs has a postResponseProcess helper method to convert the StorageErrorException into a StorageException, we should standardize this across the Storage libraries.
public Mono<Response<QueueAsyncClient>> createQueue(String queueName, Map<String, String> metadata) { QueueAsyncClient queueAsyncClient = new QueueAsyncClient(client, queueName); return queueAsyncClient.create(metadata) .map(response -> new SimpleResponse<>(response, queueAsyncClient)); }
.map(response -> new SimpleResponse<>(response, queueAsyncClient));
new QueueAsyncClient(client, queueName); } /** * Creates a queue in the storage account with the specified name and returns a QueueAsyncClient to interact * with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the queue "test"</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.createQueue
class QueueServiceAsyncClient { private final AzureQueueStorageImpl client; /** * Creates a QueueServiceAsyncClient that sends requests to the storage account at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through */ QueueServiceAsyncClient(URL endpoint, HttpPipeline httpPipeline) { this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue service is using a malformed URL. */ public URL getQueueServiceUrl() { try { return new URL(client.url()); } catch (MalformedURLException ex) { throw new RuntimeException("Storage account URL is malformed"); } } /** * Constructs a QueueAsyncClient that interacts with the specified queue. * * This will not create the queue in the storage account if it doesn't exist. * * @param queueName Name of the queue * @return QueueAsyncClient that interacts with the specified queue */ public QueueAsyncClient getQueueAsyncClient(String queueName) { return * * @param queueName Name of the queue * @return A response containing the QueueAsyncClient and the status of creating the queue * @throws StorageErrorException If a queue with the same name and different metadata already exists */ public Mono<Response<QueueAsyncClient>> createQueue(String queueName) { return createQueue(queueName, null); } /** * Creates a queue in the storage account with the specified name and metadata and returns a QueueAsyncClient to * interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the queue "test" with metadata "queue:metadata"</p> * * <pre> * client.createQueue("test", Collections.singletonMap("queue", "metadata")) * .subscribe(response -&gt; System.out.printf("Creating the queue completed with status code %d", response.statusCode())); * </pre> * * @param queueName Name of the queue * @param metadata Metadata to associate with the queue * @return A response containing the QueueAsyncClient and the status of creating the queue * @throws StorageErrorException If a queue with the same name and different metadata already exists */ public Mono<Response<QueueAsyncClient>> createQueue(String queueName, Map<String, String> metadata) { QueueAsyncClient queueAsyncClient = new QueueAsyncClient(client, queueName); return queueAsyncClient.create(metadata) .map(response -> new SimpleResponse<>(response, queueAsyncClient)); } /** * Deletes a queue in the storage account * * <p><strong>Code Samples</strong></p> * * <p>Delete the queue "test"</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.deleteQueue * * @param queueName Name of the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> deleteQueue(String queueName) { return new QueueAsyncClient(client, queueName).delete(); } /** * Lists all queues in the storage account without their metadata. * * <p><strong>Code Samples</strong></p> * * <p>List all queues in the account</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.listQueues} * * @return {@link QueueItem Queues} in the storage account */ public Flux<QueueItem> listQueues() { return listQueues(null, null); } /** * Lists the queues in the storage account that pass the filter. * * Pass true to {@link QueuesSegmentOptions * the queues. * * <p><strong>Code Samples</strong></p> * * <p>List all queues and their metadata in the account</p> * * <pre> * client.listQueues(new QueuesSegmentOptions().includeMetadata(true)) * .subscribe(result -&gt; System.out.printf("Queue %s exists in the account and has metadata %s", result.name(), result.metadata())); * </pre> * * <p>List all queues that begin with "azure"</p> * * <pre> * client.listQueues(new QueuesSegmentOptions().prefix("azure")) * .subscribe(result -&gt; System.out.printf("Queue %s exists in the account", result.name())); * </pre> * * @param options Options for listing queues * @return {@link QueueItem Queues} in the storage account that satisfy the filter requirements */ public Flux<QueueItem> listQueues(QueuesSegmentOptions options) { return listQueues(null, options); } /** * Lists the queues in the storage account that pass the filter starting at the specified marker. * * Pass true to {@link QueuesSegmentOptions * the queues. * * @param marker Starting point to list the queues * @param options Options for listing queues * @return {@link QueueItem Queues} in the storage account that satisfy the filter requirements */ Flux<QueueItem> listQueues(String marker, QueuesSegmentOptions options) { String prefix = null; Integer maxResults = null; final List<ListQueuesIncludeType> include = new ArrayList<>(); if (options != null) { prefix = options.prefix(); maxResults = options.maxResults(); if (options.includeMetadata()) { include.add(ListQueuesIncludeType.fromString(ListQueuesIncludeType.METADATA.toString())); } } Mono<ServicesListQueuesSegmentResponse> result = client.services() .listQueuesSegmentWithRestResponseAsync(prefix, marker, maxResults, include, null, null, Context.NONE); return result.flatMapMany(response -> extractAndFetchQueues(response, include, Context.NONE)); } /* * Helper function used to auto-enumerate through paged responses */ private Flux<QueueItem> listQueues(ServicesListQueuesSegmentResponse response, List<ListQueuesIncludeType> include, Context context) { ListQueuesSegmentResponse value = response.value(); Mono<ServicesListQueuesSegmentResponse> result = client.services() .listQueuesSegmentWithRestResponseAsync(value.prefix(), value.marker(), value.maxResults(), include, null, null, context); return result.flatMapMany(r -> extractAndFetchQueues(r, include, context)); } /* * Helper function used to auto-enumerate though paged responses */ private Publisher<QueueItem> extractAndFetchQueues(ServicesListQueuesSegmentResponse response, List<ListQueuesIncludeType> include, Context context) { String nextPageLink = response.value().nextMarker(); if (nextPageLink == null) { return Flux.fromIterable(response.value().queueItems()); } return Flux.fromIterable(response.value().queueItems()).concatWith(listQueues(response, include, context)); } /** * Retrieves the properties of the storage account's Queue service. The properties range from storage analytics and * metric to CORS (Cross-Origin Resource Sharing). * * <p><strong>Code Samples</strong></p> * * <p>Retrieve Queue service properties</p> * * <pre> * client.getProperties() * .subscribe(response -&gt; { * StorageServiceProperties properties = response.value(); * System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b", properties.hourMetrics().enabled(), properties.minuteMetrics().enabled()); * }); * </pre> * * @return Storage account Queue service properties */ public Mono<Response<StorageServiceProperties>> getProperties() { return client.services().getPropertiesWithRestResponseAsync(Context.NONE) .map(response -> new SimpleResponse<>(response, response.value())); } /** * Sets the properties for the storage account's Queue service. The properties range from storage analytics and * metric to CORS (Cross-Origin Resource Sharing). * * To maintain the CORS in the Queue service pass a {@code null} value for {@link StorageServiceProperties * To disable all CORS in the Queue service pass an empty list for {@link StorageServiceProperties * * <p><strong>Code Sample</strong></p> * * <p>Clear CORS in the Queue service</p> * * <pre> * StorageServiceProperties properties = client.getProperties().block().value(); * properties.cors(Collections.emptyList()); * * client.setProperties(properties) * .subscribe(response -&gt; System.out.printf("Setting Queue service properties completed with status code %d", response.statusCode())); * </pre> * * <p>Enable Minute and Hour Metrics</p> * * <pre> * StorageServiceProperties properties = client.getProperties().block().value(); * properties.minuteMetrics().enabled(true); * properties.hourMetrics().enabled(true); * * client.setProperties(properties) * .subscribe(response -&gt; System.out.printf("Setting Queue service properties completed with status code %d", response.statusCode())); * </pre> * * @param properties Storage account Queue service properties * @return A response that only contains headers and response status code * @throws StorageErrorException When one of the following is true * <ul> * <li>A CORS rule is missing one of its fields</li> * <li>More than five CORS rules will exist for the Queue service</li> * <li>Size of all CORS rules exceeds 2KB</li> * <li> * Length of {@link CorsRule * or {@link CorsRule * </li> * <li>{@link CorsRule * </ul> */ public Mono<VoidResponse> setProperties(StorageServiceProperties properties) { return client.services().setPropertiesWithRestResponseAsync(properties, Context.NONE) .map(VoidResponse::new); } /** * Retrieves the geo replication information about the Queue service. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the geo replication information</p> * * <pre> * client.getStatistics() * .subscribe(response -&gt; { * StorageServiceStats stats = response.value(); * System.out.printf("Geo replication status: %s, Last synced: %s", stats.geoReplication.status(), stats.geoReplication().lastSyncTime()); * }); * </pre> * * @return The geo replication information about the Queue service */ public Mono<Response<StorageServiceStats>> getStatistics() { return client.services().getStatisticsWithRestResponseAsync(Context.NONE) .map(response -> new SimpleResponse<>(response, response.value())); } }
class QueueServiceAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(QueueServiceAsyncClient.class); private final AzureQueueStorageImpl client; /** * Creates a QueueServiceAsyncClient that sends requests to the storage account at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through */ QueueServiceAsyncClient(URL endpoint, HttpPipeline httpPipeline) { this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue service is using a malformed URL. */ public URL getQueueServiceUrl() { try { return new URL(client.url()); } catch (MalformedURLException ex) { LOGGER.asError().log("Queue Service URL is malformed"); throw new RuntimeException("Storage account URL is malformed"); } } /** * Constructs a QueueAsyncClient that interacts with the specified queue. * * This will not create the queue in the storage account if it doesn't exist. * * @param queueName Name of the queue * @return QueueAsyncClient that interacts with the specified queue */ public QueueAsyncClient getQueueAsyncClient(String queueName) { return * * @param queueName Name of the queue * @return A response containing the QueueAsyncClient and the status of creating the queue * @throws StorageErrorException If a queue with the same name and different metadata already exists */ public Mono<Response<QueueAsyncClient>> createQueue(String queueName) { return createQueue(queueName, null); } /** * Creates a queue in the storage account with the specified name and metadata and returns a QueueAsyncClient to * interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the queue "test" with metadata "queue:metadata"</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.createQueue * * @param queueName Name of the queue * @param metadata Metadata to associate with the queue * @return A response containing the QueueAsyncClient and the status of creating the queue * @throws StorageErrorException If a queue with the same name and different metadata already exists */ public Mono<Response<QueueAsyncClient>> createQueue(String queueName, Map<String, String> metadata) { QueueAsyncClient queueAsyncClient = new QueueAsyncClient(client, queueName); return queueAsyncClient.create(metadata) .map(response -> new SimpleResponse<>(response, queueAsyncClient)); } /** * Deletes a queue in the storage account * * <p><strong>Code Samples</strong></p> * * <p>Delete the queue "test"</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.deleteQueue * * @param queueName Name of the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> deleteQueue(String queueName) { return new QueueAsyncClient(client, queueName).delete(); } /** * Lists all queues in the storage account without their metadata. * * <p><strong>Code Samples</strong></p> * * <p>List all queues in the account</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.listQueues} * * @return {@link QueueItem Queues} in the storage account */ public Flux<QueueItem> listQueues() { return listQueues(null, null); } /** * Lists the queues in the storage account that pass the filter. * * Pass true to {@link QueuesSegmentOptions * the queues. * * <p><strong>Code Samples</strong></p> * * <p>List all queues that begin with "azure"</p> * * {@codesnippet com.azure.storage.queue.queueServiceClient.listQueues * * @param options Options for listing queues * @return {@link QueueItem Queues} in the storage account that satisfy the filter requirements */ public Flux<QueueItem> listQueues(QueuesSegmentOptions options) { return listQueues(null, options); } /** * Lists the queues in the storage account that pass the filter starting at the specified marker. * * Pass true to {@link QueuesSegmentOptions * the queues. * * @param marker Starting point to list the queues * @param options Options for listing queues * @return {@link QueueItem Queues} in the storage account that satisfy the filter requirements */ Flux<QueueItem> listQueues(String marker, QueuesSegmentOptions options) { String prefix = null; Integer maxResults = null; final List<ListQueuesIncludeType> include = new ArrayList<>(); if (options != null) { prefix = options.prefix(); maxResults = options.maxResults(); if (options.includeMetadata()) { include.add(ListQueuesIncludeType.fromString(ListQueuesIncludeType.METADATA.toString())); } } Mono<ServicesListQueuesSegmentResponse> result = client.services() .listQueuesSegmentWithRestResponseAsync(prefix, marker, maxResults, include, null, null, Context.NONE); return result.flatMapMany(response -> extractAndFetchQueues(response, include, Context.NONE)); } /* * Helper function used to auto-enumerate through paged responses */ private Flux<QueueItem> listQueues(ServicesListQueuesSegmentResponse response, List<ListQueuesIncludeType> include, Context context) { ListQueuesSegmentResponse value = response.value(); Mono<ServicesListQueuesSegmentResponse> result = client.services() .listQueuesSegmentWithRestResponseAsync(value.prefix(), value.marker(), value.maxResults(), include, null, null, context); return result.flatMapMany(r -> extractAndFetchQueues(r, include, context)); } /* * Helper function used to auto-enumerate though paged responses */ private Flux<QueueItem> extractAndFetchQueues(ServicesListQueuesSegmentResponse response, List<ListQueuesIncludeType> include, Context context) { String nextPageLink = response.value().nextMarker(); if (nextPageLink == null) { return Flux.fromIterable(response.value().queueItems()); } return Flux.fromIterable(response.value().queueItems()).concatWith(listQueues(response, include, context)); } /** * Retrieves the properties of the storage account's Queue service. The properties range from storage analytics and * metric to CORS (Cross-Origin Resource Sharing). * * <p><strong>Code Samples</strong></p> * * <p>Retrieve Queue service properties</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.getProperties} * * @return Storage account Queue service properties */ public Mono<Response<StorageServiceProperties>> getProperties() { return client.services().getPropertiesWithRestResponseAsync(Context.NONE) .map(response -> new SimpleResponse<>(response, response.value())); } /** * Sets the properties for the storage account's Queue service. The properties range from storage analytics and * metric to CORS (Cross-Origin Resource Sharing). * * To maintain the CORS in the Queue service pass a {@code null} value for {@link StorageServiceProperties * To disable all CORS in the Queue service pass an empty list for {@link StorageServiceProperties * * <p><strong>Code Sample</strong></p> * * <p>Clear CORS in the Queue service</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.setProperties * * <p>Enable Minute and Hour Metrics</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.setPropertiesEnableMetrics * * @param properties Storage account Queue service properties * @return A response that only contains headers and response status code * @throws StorageErrorException When one of the following is true * <ul> * <li>A CORS rule is missing one of its fields</li> * <li>More than five CORS rules will exist for the Queue service</li> * <li>Size of all CORS rules exceeds 2KB</li> * <li> * Length of {@link CorsRule * or {@link CorsRule * </li> * <li>{@link CorsRule * </ul> */ public Mono<VoidResponse> setProperties(StorageServiceProperties properties) { return client.services().setPropertiesWithRestResponseAsync(properties, Context.NONE) .map(VoidResponse::new); } /** * Retrieves the geo replication information about the Queue service. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the geo replication information</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.getStatistics} * * @return The geo replication information about the Queue service */ public Mono<Response<StorageServiceStats>> getStatistics() { return client.services().getStatisticsWithRestResponseAsync(Context.NONE) .map(response -> new SimpleResponse<>(response, response.value())); } }
This doesn't seem to be the correct way to chain Reactor requests, I think `then()` should be used
public static void main(String[] args) { String queueURL = String.format("https: QueueAsyncClient queueAsyncClient = new QueueClientBuilder().endpoint(queueURL).buildAsyncClient(); queueAsyncClient.create() .flatMap(response -> queueAsyncClient.enqueueMessage("This is message 1")) .flatMap(response -> queueAsyncClient.enqueueMessage("This is message 2")) .subscribe( response -> { System.out.println("Message successfully equeueed by queueAsyncClient. Message id:" + response.value().messageId()); }, err -> { System.out.println("Error thrown when enqueue the message. Error message: " + err.getMessage()); }, () -> { System.out.println("The enqueue has been completed."); } ); }
.flatMap(response -> queueAsyncClient.enqueueMessage("This is message 1"))
public static void main(String[] args) { String queueURL = String.format("https: QueueAsyncClient queueAsyncClient = new QueueClientBuilder().endpoint(queueURL).buildAsyncClient(); queueAsyncClient.create() .doOnSuccess(response -> queueAsyncClient.enqueueMessage("This is message 1")) .then(queueAsyncClient.enqueueMessage("This is message 2")) .subscribe( response -> { System.out.println("Message successfully equeueed by queueAsyncClient. Message id:" + response.value().messageId()); }, err -> { System.out.println("Error thrown when enqueue the message. Error message: " + err.getMessage()); }, () -> { System.out.println("The enqueue has been completed."); } ); }
class AsyncSamples { private static final String ACCOUNT_NAME = System.getenv("AZURE_STORAGE_ACCOUNT_NAME"); private static final String SAS_TOKEN = System.getenv("PRIMARY_SAS_TOKEN"); private static final String QUEUE_NAME = generateRandomName("async-call", 16); /** * The main method shows how we do the basic operations of enqueueing and dequeueing messages on async queue client. * @param args No args needed for main method. */ private static String generateRandomName(String prefix, int length) { int len = length > prefix.length() ? length - prefix.length() : 0; return prefix + UUID.randomUUID().toString().substring(0, len); } }
class AsyncSamples { private static final String ACCOUNT_NAME = System.getenv("AZURE_STORAGE_ACCOUNT_NAME"); private static final String SAS_TOKEN = System.getenv("PRIMARY_SAS_TOKEN"); private static final String QUEUE_NAME = generateRandomName("async-call", 16); /** * The main method shows how we do the basic operations of enqueueing and dequeueing messages on async queue client. * @param args No args needed for main method. */ }
Make sense. Did not turn Mono to Flux and next operation did not depend on the response from the async request.
public static void main(String[] args) { String queueURL = String.format("https: QueueAsyncClient queueAsyncClient = new QueueClientBuilder().endpoint(queueURL).buildAsyncClient(); queueAsyncClient.create() .flatMap(response -> queueAsyncClient.enqueueMessage("This is message 1")) .flatMap(response -> queueAsyncClient.enqueueMessage("This is message 2")) .subscribe( response -> { System.out.println("Message successfully equeueed by queueAsyncClient. Message id:" + response.value().messageId()); }, err -> { System.out.println("Error thrown when enqueue the message. Error message: " + err.getMessage()); }, () -> { System.out.println("The enqueue has been completed."); } ); }
.flatMap(response -> queueAsyncClient.enqueueMessage("This is message 1"))
public static void main(String[] args) { String queueURL = String.format("https: QueueAsyncClient queueAsyncClient = new QueueClientBuilder().endpoint(queueURL).buildAsyncClient(); queueAsyncClient.create() .doOnSuccess(response -> queueAsyncClient.enqueueMessage("This is message 1")) .then(queueAsyncClient.enqueueMessage("This is message 2")) .subscribe( response -> { System.out.println("Message successfully equeueed by queueAsyncClient. Message id:" + response.value().messageId()); }, err -> { System.out.println("Error thrown when enqueue the message. Error message: " + err.getMessage()); }, () -> { System.out.println("The enqueue has been completed."); } ); }
class AsyncSamples { private static final String ACCOUNT_NAME = System.getenv("AZURE_STORAGE_ACCOUNT_NAME"); private static final String SAS_TOKEN = System.getenv("PRIMARY_SAS_TOKEN"); private static final String QUEUE_NAME = generateRandomName("async-call", 16); /** * The main method shows how we do the basic operations of enqueueing and dequeueing messages on async queue client. * @param args No args needed for main method. */ private static String generateRandomName(String prefix, int length) { int len = length > prefix.length() ? length - prefix.length() : 0; return prefix + UUID.randomUUID().toString().substring(0, len); } }
class AsyncSamples { private static final String ACCOUNT_NAME = System.getenv("AZURE_STORAGE_ACCOUNT_NAME"); private static final String SAS_TOKEN = System.getenv("PRIMARY_SAS_TOKEN"); private static final String QUEUE_NAME = generateRandomName("async-call", 16); /** * The main method shows how we do the basic operations of enqueueing and dequeueing messages on async queue client. * @param args No args needed for main method. */ }
Will change the first one to doOnSuccess, and second one change to then.
public static void main(String[] args) { String queueURL = String.format("https: QueueAsyncClient queueAsyncClient = new QueueClientBuilder().endpoint(queueURL).buildAsyncClient(); queueAsyncClient.create() .flatMap(response -> queueAsyncClient.enqueueMessage("This is message 1")) .flatMap(response -> queueAsyncClient.enqueueMessage("This is message 2")) .subscribe( response -> { System.out.println("Message successfully equeueed by queueAsyncClient. Message id:" + response.value().messageId()); }, err -> { System.out.println("Error thrown when enqueue the message. Error message: " + err.getMessage()); }, () -> { System.out.println("The enqueue has been completed."); } ); }
.flatMap(response -> queueAsyncClient.enqueueMessage("This is message 1"))
public static void main(String[] args) { String queueURL = String.format("https: QueueAsyncClient queueAsyncClient = new QueueClientBuilder().endpoint(queueURL).buildAsyncClient(); queueAsyncClient.create() .doOnSuccess(response -> queueAsyncClient.enqueueMessage("This is message 1")) .then(queueAsyncClient.enqueueMessage("This is message 2")) .subscribe( response -> { System.out.println("Message successfully equeueed by queueAsyncClient. Message id:" + response.value().messageId()); }, err -> { System.out.println("Error thrown when enqueue the message. Error message: " + err.getMessage()); }, () -> { System.out.println("The enqueue has been completed."); } ); }
class AsyncSamples { private static final String ACCOUNT_NAME = System.getenv("AZURE_STORAGE_ACCOUNT_NAME"); private static final String SAS_TOKEN = System.getenv("PRIMARY_SAS_TOKEN"); private static final String QUEUE_NAME = generateRandomName("async-call", 16); /** * The main method shows how we do the basic operations of enqueueing and dequeueing messages on async queue client. * @param args No args needed for main method. */ private static String generateRandomName(String prefix, int length) { int len = length > prefix.length() ? length - prefix.length() : 0; return prefix + UUID.randomUUID().toString().substring(0, len); } }
class AsyncSamples { private static final String ACCOUNT_NAME = System.getenv("AZURE_STORAGE_ACCOUNT_NAME"); private static final String SAS_TOKEN = System.getenv("PRIMARY_SAS_TOKEN"); private static final String QUEUE_NAME = generateRandomName("async-call", 16); /** * The main method shows how we do the basic operations of enqueueing and dequeueing messages on async queue client. * @param args No args needed for main method. */ }
Good call out. I can add a feature issue in epic. I's wish we push the basic one to feature branch first. Then address these things.
public Mono<Response<QueueAsyncClient>> createQueue(String queueName, Map<String, String> metadata) { QueueAsyncClient queueAsyncClient = new QueueAsyncClient(client, queueName); return queueAsyncClient.create(metadata) .map(response -> new SimpleResponse<>(response, queueAsyncClient)); }
.map(response -> new SimpleResponse<>(response, queueAsyncClient));
new QueueAsyncClient(client, queueName); } /** * Creates a queue in the storage account with the specified name and returns a QueueAsyncClient to interact * with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the queue "test"</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.createQueue
class QueueServiceAsyncClient { private final AzureQueueStorageImpl client; /** * Creates a QueueServiceAsyncClient that sends requests to the storage account at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through */ QueueServiceAsyncClient(URL endpoint, HttpPipeline httpPipeline) { this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue service is using a malformed URL. */ public URL getQueueServiceUrl() { try { return new URL(client.url()); } catch (MalformedURLException ex) { throw new RuntimeException("Storage account URL is malformed"); } } /** * Constructs a QueueAsyncClient that interacts with the specified queue. * * This will not create the queue in the storage account if it doesn't exist. * * @param queueName Name of the queue * @return QueueAsyncClient that interacts with the specified queue */ public QueueAsyncClient getQueueAsyncClient(String queueName) { return * * @param queueName Name of the queue * @return A response containing the QueueAsyncClient and the status of creating the queue * @throws StorageErrorException If a queue with the same name and different metadata already exists */ public Mono<Response<QueueAsyncClient>> createQueue(String queueName) { return createQueue(queueName, null); } /** * Creates a queue in the storage account with the specified name and metadata and returns a QueueAsyncClient to * interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the queue "test" with metadata "queue:metadata"</p> * * <pre> * client.createQueue("test", Collections.singletonMap("queue", "metadata")) * .subscribe(response -&gt; System.out.printf("Creating the queue completed with status code %d", response.statusCode())); * </pre> * * @param queueName Name of the queue * @param metadata Metadata to associate with the queue * @return A response containing the QueueAsyncClient and the status of creating the queue * @throws StorageErrorException If a queue with the same name and different metadata already exists */ public Mono<Response<QueueAsyncClient>> createQueue(String queueName, Map<String, String> metadata) { QueueAsyncClient queueAsyncClient = new QueueAsyncClient(client, queueName); return queueAsyncClient.create(metadata) .map(response -> new SimpleResponse<>(response, queueAsyncClient)); } /** * Deletes a queue in the storage account * * <p><strong>Code Samples</strong></p> * * <p>Delete the queue "test"</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.deleteQueue * * @param queueName Name of the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> deleteQueue(String queueName) { return new QueueAsyncClient(client, queueName).delete(); } /** * Lists all queues in the storage account without their metadata. * * <p><strong>Code Samples</strong></p> * * <p>List all queues in the account</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.listQueues} * * @return {@link QueueItem Queues} in the storage account */ public Flux<QueueItem> listQueues() { return listQueues(null, null); } /** * Lists the queues in the storage account that pass the filter. * * Pass true to {@link QueuesSegmentOptions * the queues. * * <p><strong>Code Samples</strong></p> * * <p>List all queues and their metadata in the account</p> * * <pre> * client.listQueues(new QueuesSegmentOptions().includeMetadata(true)) * .subscribe(result -&gt; System.out.printf("Queue %s exists in the account and has metadata %s", result.name(), result.metadata())); * </pre> * * <p>List all queues that begin with "azure"</p> * * <pre> * client.listQueues(new QueuesSegmentOptions().prefix("azure")) * .subscribe(result -&gt; System.out.printf("Queue %s exists in the account", result.name())); * </pre> * * @param options Options for listing queues * @return {@link QueueItem Queues} in the storage account that satisfy the filter requirements */ public Flux<QueueItem> listQueues(QueuesSegmentOptions options) { return listQueues(null, options); } /** * Lists the queues in the storage account that pass the filter starting at the specified marker. * * Pass true to {@link QueuesSegmentOptions * the queues. * * @param marker Starting point to list the queues * @param options Options for listing queues * @return {@link QueueItem Queues} in the storage account that satisfy the filter requirements */ Flux<QueueItem> listQueues(String marker, QueuesSegmentOptions options) { String prefix = null; Integer maxResults = null; final List<ListQueuesIncludeType> include = new ArrayList<>(); if (options != null) { prefix = options.prefix(); maxResults = options.maxResults(); if (options.includeMetadata()) { include.add(ListQueuesIncludeType.fromString(ListQueuesIncludeType.METADATA.toString())); } } Mono<ServicesListQueuesSegmentResponse> result = client.services() .listQueuesSegmentWithRestResponseAsync(prefix, marker, maxResults, include, null, null, Context.NONE); return result.flatMapMany(response -> extractAndFetchQueues(response, include, Context.NONE)); } /* * Helper function used to auto-enumerate through paged responses */ private Flux<QueueItem> listQueues(ServicesListQueuesSegmentResponse response, List<ListQueuesIncludeType> include, Context context) { ListQueuesSegmentResponse value = response.value(); Mono<ServicesListQueuesSegmentResponse> result = client.services() .listQueuesSegmentWithRestResponseAsync(value.prefix(), value.marker(), value.maxResults(), include, null, null, context); return result.flatMapMany(r -> extractAndFetchQueues(r, include, context)); } /* * Helper function used to auto-enumerate though paged responses */ private Publisher<QueueItem> extractAndFetchQueues(ServicesListQueuesSegmentResponse response, List<ListQueuesIncludeType> include, Context context) { String nextPageLink = response.value().nextMarker(); if (nextPageLink == null) { return Flux.fromIterable(response.value().queueItems()); } return Flux.fromIterable(response.value().queueItems()).concatWith(listQueues(response, include, context)); } /** * Retrieves the properties of the storage account's Queue service. The properties range from storage analytics and * metric to CORS (Cross-Origin Resource Sharing). * * <p><strong>Code Samples</strong></p> * * <p>Retrieve Queue service properties</p> * * <pre> * client.getProperties() * .subscribe(response -&gt; { * StorageServiceProperties properties = response.value(); * System.out.printf("Hour metrics enabled: %b, Minute metrics enabled: %b", properties.hourMetrics().enabled(), properties.minuteMetrics().enabled()); * }); * </pre> * * @return Storage account Queue service properties */ public Mono<Response<StorageServiceProperties>> getProperties() { return client.services().getPropertiesWithRestResponseAsync(Context.NONE) .map(response -> new SimpleResponse<>(response, response.value())); } /** * Sets the properties for the storage account's Queue service. The properties range from storage analytics and * metric to CORS (Cross-Origin Resource Sharing). * * To maintain the CORS in the Queue service pass a {@code null} value for {@link StorageServiceProperties * To disable all CORS in the Queue service pass an empty list for {@link StorageServiceProperties * * <p><strong>Code Sample</strong></p> * * <p>Clear CORS in the Queue service</p> * * <pre> * StorageServiceProperties properties = client.getProperties().block().value(); * properties.cors(Collections.emptyList()); * * client.setProperties(properties) * .subscribe(response -&gt; System.out.printf("Setting Queue service properties completed with status code %d", response.statusCode())); * </pre> * * <p>Enable Minute and Hour Metrics</p> * * <pre> * StorageServiceProperties properties = client.getProperties().block().value(); * properties.minuteMetrics().enabled(true); * properties.hourMetrics().enabled(true); * * client.setProperties(properties) * .subscribe(response -&gt; System.out.printf("Setting Queue service properties completed with status code %d", response.statusCode())); * </pre> * * @param properties Storage account Queue service properties * @return A response that only contains headers and response status code * @throws StorageErrorException When one of the following is true * <ul> * <li>A CORS rule is missing one of its fields</li> * <li>More than five CORS rules will exist for the Queue service</li> * <li>Size of all CORS rules exceeds 2KB</li> * <li> * Length of {@link CorsRule * or {@link CorsRule * </li> * <li>{@link CorsRule * </ul> */ public Mono<VoidResponse> setProperties(StorageServiceProperties properties) { return client.services().setPropertiesWithRestResponseAsync(properties, Context.NONE) .map(VoidResponse::new); } /** * Retrieves the geo replication information about the Queue service. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the geo replication information</p> * * <pre> * client.getStatistics() * .subscribe(response -&gt; { * StorageServiceStats stats = response.value(); * System.out.printf("Geo replication status: %s, Last synced: %s", stats.geoReplication.status(), stats.geoReplication().lastSyncTime()); * }); * </pre> * * @return The geo replication information about the Queue service */ public Mono<Response<StorageServiceStats>> getStatistics() { return client.services().getStatisticsWithRestResponseAsync(Context.NONE) .map(response -> new SimpleResponse<>(response, response.value())); } }
class QueueServiceAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(QueueServiceAsyncClient.class); private final AzureQueueStorageImpl client; /** * Creates a QueueServiceAsyncClient that sends requests to the storage account at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through */ QueueServiceAsyncClient(URL endpoint, HttpPipeline httpPipeline) { this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue service is using a malformed URL. */ public URL getQueueServiceUrl() { try { return new URL(client.url()); } catch (MalformedURLException ex) { LOGGER.asError().log("Queue Service URL is malformed"); throw new RuntimeException("Storage account URL is malformed"); } } /** * Constructs a QueueAsyncClient that interacts with the specified queue. * * This will not create the queue in the storage account if it doesn't exist. * * @param queueName Name of the queue * @return QueueAsyncClient that interacts with the specified queue */ public QueueAsyncClient getQueueAsyncClient(String queueName) { return * * @param queueName Name of the queue * @return A response containing the QueueAsyncClient and the status of creating the queue * @throws StorageErrorException If a queue with the same name and different metadata already exists */ public Mono<Response<QueueAsyncClient>> createQueue(String queueName) { return createQueue(queueName, null); } /** * Creates a queue in the storage account with the specified name and metadata and returns a QueueAsyncClient to * interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the queue "test" with metadata "queue:metadata"</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.createQueue * * @param queueName Name of the queue * @param metadata Metadata to associate with the queue * @return A response containing the QueueAsyncClient and the status of creating the queue * @throws StorageErrorException If a queue with the same name and different metadata already exists */ public Mono<Response<QueueAsyncClient>> createQueue(String queueName, Map<String, String> metadata) { QueueAsyncClient queueAsyncClient = new QueueAsyncClient(client, queueName); return queueAsyncClient.create(metadata) .map(response -> new SimpleResponse<>(response, queueAsyncClient)); } /** * Deletes a queue in the storage account * * <p><strong>Code Samples</strong></p> * * <p>Delete the queue "test"</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.deleteQueue * * @param queueName Name of the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> deleteQueue(String queueName) { return new QueueAsyncClient(client, queueName).delete(); } /** * Lists all queues in the storage account without their metadata. * * <p><strong>Code Samples</strong></p> * * <p>List all queues in the account</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.listQueues} * * @return {@link QueueItem Queues} in the storage account */ public Flux<QueueItem> listQueues() { return listQueues(null, null); } /** * Lists the queues in the storage account that pass the filter. * * Pass true to {@link QueuesSegmentOptions * the queues. * * <p><strong>Code Samples</strong></p> * * <p>List all queues that begin with "azure"</p> * * {@codesnippet com.azure.storage.queue.queueServiceClient.listQueues * * @param options Options for listing queues * @return {@link QueueItem Queues} in the storage account that satisfy the filter requirements */ public Flux<QueueItem> listQueues(QueuesSegmentOptions options) { return listQueues(null, options); } /** * Lists the queues in the storage account that pass the filter starting at the specified marker. * * Pass true to {@link QueuesSegmentOptions * the queues. * * @param marker Starting point to list the queues * @param options Options for listing queues * @return {@link QueueItem Queues} in the storage account that satisfy the filter requirements */ Flux<QueueItem> listQueues(String marker, QueuesSegmentOptions options) { String prefix = null; Integer maxResults = null; final List<ListQueuesIncludeType> include = new ArrayList<>(); if (options != null) { prefix = options.prefix(); maxResults = options.maxResults(); if (options.includeMetadata()) { include.add(ListQueuesIncludeType.fromString(ListQueuesIncludeType.METADATA.toString())); } } Mono<ServicesListQueuesSegmentResponse> result = client.services() .listQueuesSegmentWithRestResponseAsync(prefix, marker, maxResults, include, null, null, Context.NONE); return result.flatMapMany(response -> extractAndFetchQueues(response, include, Context.NONE)); } /* * Helper function used to auto-enumerate through paged responses */ private Flux<QueueItem> listQueues(ServicesListQueuesSegmentResponse response, List<ListQueuesIncludeType> include, Context context) { ListQueuesSegmentResponse value = response.value(); Mono<ServicesListQueuesSegmentResponse> result = client.services() .listQueuesSegmentWithRestResponseAsync(value.prefix(), value.marker(), value.maxResults(), include, null, null, context); return result.flatMapMany(r -> extractAndFetchQueues(r, include, context)); } /* * Helper function used to auto-enumerate though paged responses */ private Flux<QueueItem> extractAndFetchQueues(ServicesListQueuesSegmentResponse response, List<ListQueuesIncludeType> include, Context context) { String nextPageLink = response.value().nextMarker(); if (nextPageLink == null) { return Flux.fromIterable(response.value().queueItems()); } return Flux.fromIterable(response.value().queueItems()).concatWith(listQueues(response, include, context)); } /** * Retrieves the properties of the storage account's Queue service. The properties range from storage analytics and * metric to CORS (Cross-Origin Resource Sharing). * * <p><strong>Code Samples</strong></p> * * <p>Retrieve Queue service properties</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.getProperties} * * @return Storage account Queue service properties */ public Mono<Response<StorageServiceProperties>> getProperties() { return client.services().getPropertiesWithRestResponseAsync(Context.NONE) .map(response -> new SimpleResponse<>(response, response.value())); } /** * Sets the properties for the storage account's Queue service. The properties range from storage analytics and * metric to CORS (Cross-Origin Resource Sharing). * * To maintain the CORS in the Queue service pass a {@code null} value for {@link StorageServiceProperties * To disable all CORS in the Queue service pass an empty list for {@link StorageServiceProperties * * <p><strong>Code Sample</strong></p> * * <p>Clear CORS in the Queue service</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.setProperties * * <p>Enable Minute and Hour Metrics</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.setPropertiesEnableMetrics * * @param properties Storage account Queue service properties * @return A response that only contains headers and response status code * @throws StorageErrorException When one of the following is true * <ul> * <li>A CORS rule is missing one of its fields</li> * <li>More than five CORS rules will exist for the Queue service</li> * <li>Size of all CORS rules exceeds 2KB</li> * <li> * Length of {@link CorsRule * or {@link CorsRule * </li> * <li>{@link CorsRule * </ul> */ public Mono<VoidResponse> setProperties(StorageServiceProperties properties) { return client.services().setPropertiesWithRestResponseAsync(properties, Context.NONE) .map(VoidResponse::new); } /** * Retrieves the geo replication information about the Queue service. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve the geo replication information</p> * * {@codesnippet com.azure.storage.queue.queueServiceAsyncClient.getStatistics} * * @return The geo replication information about the Queue service */ public Mono<Response<StorageServiceStats>> getStatistics() { return client.services().getStatisticsWithRestResponseAsync(Context.NONE) .map(response -> new SimpleResponse<>(response, response.value())); } }
ClientLogger.logAndThrow all exceptions. Please search for all cases because soon Shawn will check in his CheckStyle rule and it will break the build.
public URL getQueueUrl() { try { return new URL(client.url()); } catch (MalformedURLException ex) { throw new RuntimeException("Queue URL is malformed"); } }
throw new RuntimeException("Queue URL is malformed");
public URL getQueueUrl() { try { return new URL(client.url()); } catch (MalformedURLException ex) { LOGGER.asError().log("Queue URL is malformed"); throw new RuntimeException("Queue URL is malformed"); } }
class QueueAsyncClient { private final AzureQueueStorageImpl client; private final String queueName; /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@link AzureQueueStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param client Client that interacts with the service interfaces * @param queueName Name of the queue */ QueueAsyncClient(AzureQueueStorageImpl client, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(client.httpPipeline()) .url(client.url()) .version(client.version()) .build(); } /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through * @param queueName Name of the queue */ QueueAsyncClient(URL endpoint, HttpPipeline httpPipeline, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue is using a malformed URL. */ /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create} * * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name already exists in the queue service. */ public Mono<VoidResponse> create() { return create(null); } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue with metadata "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create * * @param metadata Metadata to associate with the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name and different metadata already exists in the queue service. */ public Mono<VoidResponse> create(Map<String, String> metadata) { return client.queues().createWithRestResponseAsync(queueName, null, metadata, null, Context.NONE) .map(VoidResponse::new); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.delete} * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> delete() { return client.queues().deleteWithRestResponseAsync(queueName, Context.NONE) .map(VoidResponse::new); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties} * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<QueueProperties>> getProperties() { return client.queues().getPropertiesWithRestResponseAsync(queueName, Context.NONE) .map(this::getQueuePropertiesResponse); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> setMetadata(Map<String, String> metadata) { return client.queues().setMetadataWithRestResponseAsync(queueName, null, metadata, null, Context.NONE) .map(VoidResponse::new); } /** * Retrieves stored access policies specified on the queue. * * <p><strong>Code Samples</strong></p> * * <p>List the stored access policies</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy} * * @return The stored access policies specified on the queue. * @throws StorageErrorException If the queue doesn't exist */ public Flux<SignedIdentifier> getAccessPolicy() { return client.queues().getAccessPolicyWithRestResponseAsync(queueName, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicy} * * @param permissions Access policies to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<VoidResponse> setAccessPolicy(List<SignedIdentifier> permissions) { return client.queues().setAccessPolicyWithRestResponseAsync(queueName, permissions, null, null, Context.NONE) .map(VoidResponse::new); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages} * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> clearMessages() { return client.messages().clearWithRestResponseAsync(queueName, Context.NONE) .map(VoidResponse::new); } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * @param messageText Message text * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText) { return enqueueMessage(messageText, Duration.ofSeconds(0), Duration.ofDays(7)); } /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageLiveTime * * @param messageText Message text * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue in seconds. If unset the value will * default to 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number. * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText, Duration visibilityTimeout, Duration timeToLive) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds(); QueueMessage message = new QueueMessage().messageText(messageText); return client.messages().enqueueWithRestResponseAsync(queueName, message, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, Context.NONE) .map(response -> new SimpleResponse<>(response, response.value().get(0))); } /** * Retrieves the first message in the queue and hides it from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue a message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages} * * @return The first {@link DequeuedMessage} in the queue, it contains * {@link DequeuedMessage * with the message, additionally it contains other metadata about the message. * @throws StorageErrorException If the queue doesn't exist */ public Flux<DequeuedMessage> dequeueMessages() { return dequeueMessages(1, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DequeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages) { return dequeueMessages(maxMessages, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for the * timeout period. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages and give them a 60 second timeout period</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If left empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DeqeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is * outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages, Duration visibilityTimeout) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); return client.messages().dequeueWithRestResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Peeks the first message in the queue. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages} * * @return A {@link PeekedMessage} that contains metadata about the message. */ public Flux<PeekedMessage> peekMessages() { return peekMessages(null); } /** * Peek messages from the front of the queue up to the maximum number of messages. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek up to the first five messages</p> * * <pre> * client.peekMessages(5) * .subscribe(result -&gt; System.out.printf("Peeked message %s has been dequeued %d times", result.messageId(), result.dequeueCount())); * </pre> * * @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue than requested * all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link PeekedMessage PeekedMessages} from the queue. Each PeekedMessage contains * metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<PeekedMessage> peekMessages(Integer maxMessages) { return client.messages().peekWithRestResponseAsync(queueName, maxMessages, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessage} * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<Response<UpdatedMessage>> updateMessage(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { QueueMessage message = new QueueMessage().messageText(messageText); return client.messageIds().updateWithRestResponseAsync(queueName, messageId, message, popReceipt, (int) visibilityTimeout.getSeconds(), Context.NONE) .map(this::getUpdatedMessageResponse); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessage} * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<VoidResponse> deleteMessage(String messageId, String popReceipt) { return client.messageIds().deleteWithRestResponseAsync(queueName, messageId, popReceipt, Context.NONE) .map(VoidResponse::new); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) { QueueGetPropertiesHeaders propertiesHeaders = response.deserializedHeaders(); QueueProperties properties = new QueueProperties(propertiesHeaders.metadata(), propertiesHeaders.approximateMessagesCount()); return new SimpleResponse<>(response, properties); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<UpdatedMessage> getUpdatedMessageResponse(MessageIdsUpdateResponse response) { MessageIdUpdateHeaders headers = response.deserializedHeaders(); UpdatedMessage updatedMessage = new UpdatedMessage(headers.popReceipt(), headers.timeNextVisible()); return new SimpleResponse<>(response, updatedMessage); } }
class QueueAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(QueueAsyncClient.class); private final AzureQueueStorageImpl client; private final String queueName; /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code AzureQueueStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code AzureQueueStorageImpl client}. * * @param client Client that interacts with the service interfaces * @param queueName Name of the queue */ QueueAsyncClient(AzureQueueStorageImpl client, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(client.httpPipeline()) .url(client.url()) .version(client.version()) .build(); } /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through * @param queueName Name of the queue */ QueueAsyncClient(URL endpoint, HttpPipeline httpPipeline, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue is using a malformed URL. */ /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create} * * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name already exists in the queue service. */ public Mono<VoidResponse> create() { return create(null); } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue with metadata "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create * * @param metadata Metadata to associate with the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name and different metadata already exists in the queue service. */ public Mono<VoidResponse> create(Map<String, String> metadata) { return client.queues().createWithRestResponseAsync(queueName, null, metadata, null, Context.NONE) .map(VoidResponse::new); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.delete} * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> delete() { return client.queues().deleteWithRestResponseAsync(queueName, Context.NONE) .map(VoidResponse::new); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties} * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<QueueProperties>> getProperties() { return client.queues().getPropertiesWithRestResponseAsync(queueName, Context.NONE) .map(this::getQueuePropertiesResponse); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> setMetadata(Map<String, String> metadata) { return client.queues().setMetadataWithRestResponseAsync(queueName, null, metadata, null, Context.NONE) .map(VoidResponse::new); } /** * Retrieves stored access policies specified on the queue. * * <p><strong>Code Samples</strong></p> * * <p>List the stored access policies</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy} * * @return The stored access policies specified on the queue. * @throws StorageErrorException If the queue doesn't exist */ public Flux<SignedIdentifier> getAccessPolicy() { return client.queues().getAccessPolicyWithRestResponseAsync(queueName, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicy} * * @param permissions Access policies to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<VoidResponse> setAccessPolicy(List<SignedIdentifier> permissions) { return client.queues().setAccessPolicyWithRestResponseAsync(queueName, permissions, null, null, Context.NONE) .map(VoidResponse::new); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages} * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> clearMessages() { return client.messages().clearWithRestResponseAsync(queueName, Context.NONE) .map(VoidResponse::new); } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * @param messageText Message text * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText) { return enqueueMessage(messageText, Duration.ofSeconds(0), Duration.ofDays(7)); } /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageLiveTime * * @param messageText Message text * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue in seconds. If unset the value will * default to 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number. * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText, Duration visibilityTimeout, Duration timeToLive) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds(); QueueMessage message = new QueueMessage().messageText(messageText); return client.messages().enqueueWithRestResponseAsync(queueName, message, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, Context.NONE) .map(response -> new SimpleResponse<>(response, response.value().get(0))); } /** * Retrieves the first message in the queue and hides it from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue a message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages} * * @return The first {@link DequeuedMessage} in the queue, it contains * {@link DequeuedMessage * with the message, additionally it contains other metadata about the message. * @throws StorageErrorException If the queue doesn't exist */ public Flux<DequeuedMessage> dequeueMessages() { return dequeueMessages(1, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DequeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages) { return dequeueMessages(maxMessages, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for the * timeout period. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages and give them a 60 second timeout period</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If left empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DeqeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is * outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages, Duration visibilityTimeout) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); return client.messages().dequeueWithRestResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Peeks the first message in the queue. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages} * * @return A {@link PeekedMessage} that contains metadata about the message. */ public Flux<PeekedMessage> peekMessages() { return peekMessages(null); } /** * Peek messages from the front of the queue up to the maximum number of messages. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek up to the first five messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages * * @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue than requested * all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link PeekedMessage PeekedMessages} from the queue. Each PeekedMessage contains * metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<PeekedMessage> peekMessages(Integer maxMessages) { return client.messages().peekWithRestResponseAsync(queueName, maxMessages, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessage} * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<Response<UpdatedMessage>> updateMessage(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { QueueMessage message = new QueueMessage().messageText(messageText); return client.messageIds().updateWithRestResponseAsync(queueName, messageId, message, popReceipt, (int) visibilityTimeout.getSeconds(), Context.NONE) .map(this::getUpdatedMessageResponse); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessage} * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<VoidResponse> deleteMessage(String messageId, String popReceipt) { return client.messageIds().deleteWithRestResponseAsync(queueName, messageId, popReceipt, Context.NONE) .map(VoidResponse::new); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) { QueueGetPropertiesHeaders propertiesHeaders = response.deserializedHeaders(); QueueProperties properties = new QueueProperties(propertiesHeaders.metadata(), propertiesHeaders.approximateMessagesCount()); return new SimpleResponse<>(response, properties); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<UpdatedMessage> getUpdatedMessageResponse(MessageIdsUpdateResponse response) { MessageIdUpdateHeaders headers = response.deserializedHeaders(); UpdatedMessage updatedMessage = new UpdatedMessage(headers.popReceipt(), headers.timeNextVisible()); return new SimpleResponse<>(response, updatedMessage); } }
Do you mean I added a log message before throw statement. Done.
public URL getQueueUrl() { try { return new URL(client.url()); } catch (MalformedURLException ex) { throw new RuntimeException("Queue URL is malformed"); } }
throw new RuntimeException("Queue URL is malformed");
public URL getQueueUrl() { try { return new URL(client.url()); } catch (MalformedURLException ex) { LOGGER.asError().log("Queue URL is malformed"); throw new RuntimeException("Queue URL is malformed"); } }
class QueueAsyncClient { private final AzureQueueStorageImpl client; private final String queueName; /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@link AzureQueueStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param client Client that interacts with the service interfaces * @param queueName Name of the queue */ QueueAsyncClient(AzureQueueStorageImpl client, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(client.httpPipeline()) .url(client.url()) .version(client.version()) .build(); } /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through * @param queueName Name of the queue */ QueueAsyncClient(URL endpoint, HttpPipeline httpPipeline, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue is using a malformed URL. */ /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create} * * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name already exists in the queue service. */ public Mono<VoidResponse> create() { return create(null); } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue with metadata "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create * * @param metadata Metadata to associate with the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name and different metadata already exists in the queue service. */ public Mono<VoidResponse> create(Map<String, String> metadata) { return client.queues().createWithRestResponseAsync(queueName, null, metadata, null, Context.NONE) .map(VoidResponse::new); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.delete} * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> delete() { return client.queues().deleteWithRestResponseAsync(queueName, Context.NONE) .map(VoidResponse::new); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties} * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<QueueProperties>> getProperties() { return client.queues().getPropertiesWithRestResponseAsync(queueName, Context.NONE) .map(this::getQueuePropertiesResponse); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> setMetadata(Map<String, String> metadata) { return client.queues().setMetadataWithRestResponseAsync(queueName, null, metadata, null, Context.NONE) .map(VoidResponse::new); } /** * Retrieves stored access policies specified on the queue. * * <p><strong>Code Samples</strong></p> * * <p>List the stored access policies</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy} * * @return The stored access policies specified on the queue. * @throws StorageErrorException If the queue doesn't exist */ public Flux<SignedIdentifier> getAccessPolicy() { return client.queues().getAccessPolicyWithRestResponseAsync(queueName, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicy} * * @param permissions Access policies to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<VoidResponse> setAccessPolicy(List<SignedIdentifier> permissions) { return client.queues().setAccessPolicyWithRestResponseAsync(queueName, permissions, null, null, Context.NONE) .map(VoidResponse::new); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages} * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> clearMessages() { return client.messages().clearWithRestResponseAsync(queueName, Context.NONE) .map(VoidResponse::new); } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * @param messageText Message text * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText) { return enqueueMessage(messageText, Duration.ofSeconds(0), Duration.ofDays(7)); } /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageLiveTime * * @param messageText Message text * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue in seconds. If unset the value will * default to 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number. * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText, Duration visibilityTimeout, Duration timeToLive) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds(); QueueMessage message = new QueueMessage().messageText(messageText); return client.messages().enqueueWithRestResponseAsync(queueName, message, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, Context.NONE) .map(response -> new SimpleResponse<>(response, response.value().get(0))); } /** * Retrieves the first message in the queue and hides it from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue a message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages} * * @return The first {@link DequeuedMessage} in the queue, it contains * {@link DequeuedMessage * with the message, additionally it contains other metadata about the message. * @throws StorageErrorException If the queue doesn't exist */ public Flux<DequeuedMessage> dequeueMessages() { return dequeueMessages(1, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DequeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages) { return dequeueMessages(maxMessages, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for the * timeout period. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages and give them a 60 second timeout period</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If left empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DeqeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is * outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages, Duration visibilityTimeout) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); return client.messages().dequeueWithRestResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Peeks the first message in the queue. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages} * * @return A {@link PeekedMessage} that contains metadata about the message. */ public Flux<PeekedMessage> peekMessages() { return peekMessages(null); } /** * Peek messages from the front of the queue up to the maximum number of messages. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek up to the first five messages</p> * * <pre> * client.peekMessages(5) * .subscribe(result -&gt; System.out.printf("Peeked message %s has been dequeued %d times", result.messageId(), result.dequeueCount())); * </pre> * * @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue than requested * all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link PeekedMessage PeekedMessages} from the queue. Each PeekedMessage contains * metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<PeekedMessage> peekMessages(Integer maxMessages) { return client.messages().peekWithRestResponseAsync(queueName, maxMessages, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessage} * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<Response<UpdatedMessage>> updateMessage(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { QueueMessage message = new QueueMessage().messageText(messageText); return client.messageIds().updateWithRestResponseAsync(queueName, messageId, message, popReceipt, (int) visibilityTimeout.getSeconds(), Context.NONE) .map(this::getUpdatedMessageResponse); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessage} * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<VoidResponse> deleteMessage(String messageId, String popReceipt) { return client.messageIds().deleteWithRestResponseAsync(queueName, messageId, popReceipt, Context.NONE) .map(VoidResponse::new); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) { QueueGetPropertiesHeaders propertiesHeaders = response.deserializedHeaders(); QueueProperties properties = new QueueProperties(propertiesHeaders.metadata(), propertiesHeaders.approximateMessagesCount()); return new SimpleResponse<>(response, properties); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<UpdatedMessage> getUpdatedMessageResponse(MessageIdsUpdateResponse response) { MessageIdUpdateHeaders headers = response.deserializedHeaders(); UpdatedMessage updatedMessage = new UpdatedMessage(headers.popReceipt(), headers.timeNextVisible()); return new SimpleResponse<>(response, updatedMessage); } }
class QueueAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(QueueAsyncClient.class); private final AzureQueueStorageImpl client; private final String queueName; /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code AzureQueueStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code AzureQueueStorageImpl client}. * * @param client Client that interacts with the service interfaces * @param queueName Name of the queue */ QueueAsyncClient(AzureQueueStorageImpl client, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(client.httpPipeline()) .url(client.url()) .version(client.version()) .build(); } /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through * @param queueName Name of the queue */ QueueAsyncClient(URL endpoint, HttpPipeline httpPipeline, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue is using a malformed URL. */ /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create} * * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name already exists in the queue service. */ public Mono<VoidResponse> create() { return create(null); } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue with metadata "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create * * @param metadata Metadata to associate with the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name and different metadata already exists in the queue service. */ public Mono<VoidResponse> create(Map<String, String> metadata) { return client.queues().createWithRestResponseAsync(queueName, null, metadata, null, Context.NONE) .map(VoidResponse::new); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.delete} * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> delete() { return client.queues().deleteWithRestResponseAsync(queueName, Context.NONE) .map(VoidResponse::new); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties} * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<QueueProperties>> getProperties() { return client.queues().getPropertiesWithRestResponseAsync(queueName, Context.NONE) .map(this::getQueuePropertiesResponse); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> setMetadata(Map<String, String> metadata) { return client.queues().setMetadataWithRestResponseAsync(queueName, null, metadata, null, Context.NONE) .map(VoidResponse::new); } /** * Retrieves stored access policies specified on the queue. * * <p><strong>Code Samples</strong></p> * * <p>List the stored access policies</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy} * * @return The stored access policies specified on the queue. * @throws StorageErrorException If the queue doesn't exist */ public Flux<SignedIdentifier> getAccessPolicy() { return client.queues().getAccessPolicyWithRestResponseAsync(queueName, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicy} * * @param permissions Access policies to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<VoidResponse> setAccessPolicy(List<SignedIdentifier> permissions) { return client.queues().setAccessPolicyWithRestResponseAsync(queueName, permissions, null, null, Context.NONE) .map(VoidResponse::new); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages} * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> clearMessages() { return client.messages().clearWithRestResponseAsync(queueName, Context.NONE) .map(VoidResponse::new); } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * @param messageText Message text * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText) { return enqueueMessage(messageText, Duration.ofSeconds(0), Duration.ofDays(7)); } /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageLiveTime * * @param messageText Message text * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue in seconds. If unset the value will * default to 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number. * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText, Duration visibilityTimeout, Duration timeToLive) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds(); QueueMessage message = new QueueMessage().messageText(messageText); return client.messages().enqueueWithRestResponseAsync(queueName, message, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, Context.NONE) .map(response -> new SimpleResponse<>(response, response.value().get(0))); } /** * Retrieves the first message in the queue and hides it from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue a message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages} * * @return The first {@link DequeuedMessage} in the queue, it contains * {@link DequeuedMessage * with the message, additionally it contains other metadata about the message. * @throws StorageErrorException If the queue doesn't exist */ public Flux<DequeuedMessage> dequeueMessages() { return dequeueMessages(1, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DequeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages) { return dequeueMessages(maxMessages, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for the * timeout period. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages and give them a 60 second timeout period</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If left empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DeqeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is * outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages, Duration visibilityTimeout) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); return client.messages().dequeueWithRestResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Peeks the first message in the queue. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages} * * @return A {@link PeekedMessage} that contains metadata about the message. */ public Flux<PeekedMessage> peekMessages() { return peekMessages(null); } /** * Peek messages from the front of the queue up to the maximum number of messages. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek up to the first five messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages * * @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue than requested * all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link PeekedMessage PeekedMessages} from the queue. Each PeekedMessage contains * metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<PeekedMessage> peekMessages(Integer maxMessages) { return client.messages().peekWithRestResponseAsync(queueName, maxMessages, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessage} * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<Response<UpdatedMessage>> updateMessage(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { QueueMessage message = new QueueMessage().messageText(messageText); return client.messageIds().updateWithRestResponseAsync(queueName, messageId, message, popReceipt, (int) visibilityTimeout.getSeconds(), Context.NONE) .map(this::getUpdatedMessageResponse); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessage} * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<VoidResponse> deleteMessage(String messageId, String popReceipt) { return client.messageIds().deleteWithRestResponseAsync(queueName, messageId, popReceipt, Context.NONE) .map(VoidResponse::new); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) { QueueGetPropertiesHeaders propertiesHeaders = response.deserializedHeaders(); QueueProperties properties = new QueueProperties(propertiesHeaders.metadata(), propertiesHeaders.approximateMessagesCount()); return new SimpleResponse<>(response, properties); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<UpdatedMessage> getUpdatedMessageResponse(MessageIdsUpdateResponse response) { MessageIdUpdateHeaders headers = response.deserializedHeaders(); UpdatedMessage updatedMessage = new UpdatedMessage(headers.popReceipt(), headers.timeNextVisible()); return new SimpleResponse<>(response, updatedMessage); } }
No. Just call `Logger.logAndThrow(new RuntimeException("....")` and then `return null` afterwards.
public URL getQueueUrl() { try { return new URL(client.url()); } catch (MalformedURLException ex) { throw new RuntimeException("Queue URL is malformed"); } }
throw new RuntimeException("Queue URL is malformed");
public URL getQueueUrl() { try { return new URL(client.url()); } catch (MalformedURLException ex) { LOGGER.asError().log("Queue URL is malformed"); throw new RuntimeException("Queue URL is malformed"); } }
class QueueAsyncClient { private final AzureQueueStorageImpl client; private final String queueName; /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@link AzureQueueStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code client}. * * @param client Client that interacts with the service interfaces * @param queueName Name of the queue */ QueueAsyncClient(AzureQueueStorageImpl client, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(client.httpPipeline()) .url(client.url()) .version(client.version()) .build(); } /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through * @param queueName Name of the queue */ QueueAsyncClient(URL endpoint, HttpPipeline httpPipeline, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue is using a malformed URL. */ /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create} * * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name already exists in the queue service. */ public Mono<VoidResponse> create() { return create(null); } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue with metadata "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create * * @param metadata Metadata to associate with the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name and different metadata already exists in the queue service. */ public Mono<VoidResponse> create(Map<String, String> metadata) { return client.queues().createWithRestResponseAsync(queueName, null, metadata, null, Context.NONE) .map(VoidResponse::new); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.delete} * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> delete() { return client.queues().deleteWithRestResponseAsync(queueName, Context.NONE) .map(VoidResponse::new); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties} * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<QueueProperties>> getProperties() { return client.queues().getPropertiesWithRestResponseAsync(queueName, Context.NONE) .map(this::getQueuePropertiesResponse); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> setMetadata(Map<String, String> metadata) { return client.queues().setMetadataWithRestResponseAsync(queueName, null, metadata, null, Context.NONE) .map(VoidResponse::new); } /** * Retrieves stored access policies specified on the queue. * * <p><strong>Code Samples</strong></p> * * <p>List the stored access policies</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy} * * @return The stored access policies specified on the queue. * @throws StorageErrorException If the queue doesn't exist */ public Flux<SignedIdentifier> getAccessPolicy() { return client.queues().getAccessPolicyWithRestResponseAsync(queueName, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicy} * * @param permissions Access policies to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<VoidResponse> setAccessPolicy(List<SignedIdentifier> permissions) { return client.queues().setAccessPolicyWithRestResponseAsync(queueName, permissions, null, null, Context.NONE) .map(VoidResponse::new); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages} * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> clearMessages() { return client.messages().clearWithRestResponseAsync(queueName, Context.NONE) .map(VoidResponse::new); } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * @param messageText Message text * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText) { return enqueueMessage(messageText, Duration.ofSeconds(0), Duration.ofDays(7)); } /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageLiveTime * * @param messageText Message text * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue in seconds. If unset the value will * default to 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number. * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText, Duration visibilityTimeout, Duration timeToLive) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds(); QueueMessage message = new QueueMessage().messageText(messageText); return client.messages().enqueueWithRestResponseAsync(queueName, message, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, Context.NONE) .map(response -> new SimpleResponse<>(response, response.value().get(0))); } /** * Retrieves the first message in the queue and hides it from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue a message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages} * * @return The first {@link DequeuedMessage} in the queue, it contains * {@link DequeuedMessage * with the message, additionally it contains other metadata about the message. * @throws StorageErrorException If the queue doesn't exist */ public Flux<DequeuedMessage> dequeueMessages() { return dequeueMessages(1, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DequeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages) { return dequeueMessages(maxMessages, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for the * timeout period. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages and give them a 60 second timeout period</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If left empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DeqeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is * outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages, Duration visibilityTimeout) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); return client.messages().dequeueWithRestResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Peeks the first message in the queue. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages} * * @return A {@link PeekedMessage} that contains metadata about the message. */ public Flux<PeekedMessage> peekMessages() { return peekMessages(null); } /** * Peek messages from the front of the queue up to the maximum number of messages. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek up to the first five messages</p> * * <pre> * client.peekMessages(5) * .subscribe(result -&gt; System.out.printf("Peeked message %s has been dequeued %d times", result.messageId(), result.dequeueCount())); * </pre> * * @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue than requested * all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link PeekedMessage PeekedMessages} from the queue. Each PeekedMessage contains * metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<PeekedMessage> peekMessages(Integer maxMessages) { return client.messages().peekWithRestResponseAsync(queueName, maxMessages, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessage} * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<Response<UpdatedMessage>> updateMessage(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { QueueMessage message = new QueueMessage().messageText(messageText); return client.messageIds().updateWithRestResponseAsync(queueName, messageId, message, popReceipt, (int) visibilityTimeout.getSeconds(), Context.NONE) .map(this::getUpdatedMessageResponse); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessage} * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<VoidResponse> deleteMessage(String messageId, String popReceipt) { return client.messageIds().deleteWithRestResponseAsync(queueName, messageId, popReceipt, Context.NONE) .map(VoidResponse::new); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) { QueueGetPropertiesHeaders propertiesHeaders = response.deserializedHeaders(); QueueProperties properties = new QueueProperties(propertiesHeaders.metadata(), propertiesHeaders.approximateMessagesCount()); return new SimpleResponse<>(response, properties); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<UpdatedMessage> getUpdatedMessageResponse(MessageIdsUpdateResponse response) { MessageIdUpdateHeaders headers = response.deserializedHeaders(); UpdatedMessage updatedMessage = new UpdatedMessage(headers.popReceipt(), headers.timeNextVisible()); return new SimpleResponse<>(response, updatedMessage); } }
class QueueAsyncClient { private static final ClientLogger LOGGER = new ClientLogger(QueueAsyncClient.class); private final AzureQueueStorageImpl client; private final String queueName; /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code AzureQueueStorageImpl * Each service call goes through the {@link HttpPipeline pipeline} in the {@code AzureQueueStorageImpl client}. * * @param client Client that interacts with the service interfaces * @param queueName Name of the queue */ QueueAsyncClient(AzureQueueStorageImpl client, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(client.httpPipeline()) .url(client.url()) .version(client.version()) .build(); } /** * Creates a QueueAsyncClient that sends requests to the storage queue service at {@code endpoint}. * Each service call goes through the {@code httpPipeline}. * * @param endpoint URL for the Storage Queue service * @param httpPipeline HttpPipeline that the HTTP requests and response flow through * @param queueName Name of the queue */ QueueAsyncClient(URL endpoint, HttpPipeline httpPipeline, String queueName) { this.queueName = queueName; this.client = new AzureQueueStorageBuilder().pipeline(httpPipeline) .url(endpoint.toString()) .build(); } /** * @return the URL of the storage queue * @throws RuntimeException If the queue is using a malformed URL. */ /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create} * * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name already exists in the queue service. */ public Mono<VoidResponse> create() { return create(null); } /** * Creates a new queue. * * <p><strong>Code Samples</strong></p> * * <p>Create a queue with metadata "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.create * * @param metadata Metadata to associate with the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If a queue with the same name and different metadata already exists in the queue service. */ public Mono<VoidResponse> create(Map<String, String> metadata) { return client.queues().createWithRestResponseAsync(queueName, null, metadata, null, Context.NONE) .map(VoidResponse::new); } /** * Permanently deletes the queue. * * <p><strong>Code Samples</strong></p> * * <p>Delete a queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.delete} * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> delete() { return client.queues().deleteWithRestResponseAsync(queueName, Context.NONE) .map(VoidResponse::new); } /** * Retrieves metadata and approximate message count of the queue. * * <p><strong>Code Samples</strong></p> * * <p>Get the properties of the queue</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getProperties} * * @return A response containing a {@link QueueProperties} value which contains the metadata and approximate * messages count of the queue. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<QueueProperties>> getProperties() { return client.queues().getPropertiesWithRestResponseAsync(queueName, Context.NONE) .map(this::getQueuePropertiesResponse); } /** * Sets the metadata of the queue. * * Passing in a {@code null} value for metadata will clear the metadata associated with the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set the queue's metadata to "queue:metadataMap"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setMetadata * * <p>Clear the queue's metadata</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMetadata * * @param metadata Metadata to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> setMetadata(Map<String, String> metadata) { return client.queues().setMetadataWithRestResponseAsync(queueName, null, metadata, null, Context.NONE) .map(VoidResponse::new); } /** * Retrieves stored access policies specified on the queue. * * <p><strong>Code Samples</strong></p> * * <p>List the stored access policies</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.getAccessPolicy} * * @return The stored access policies specified on the queue. * @throws StorageErrorException If the queue doesn't exist */ public Flux<SignedIdentifier> getAccessPolicy() { return client.queues().getAccessPolicyWithRestResponseAsync(queueName, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Sets stored access policies on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Set a read only stored access policy</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.setAccessPolicy} * * @param permissions Access policies to set on the queue * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist, a stored access policy doesn't have all fields filled out, * or the queue will have more than five policies. */ public Mono<VoidResponse> setAccessPolicy(List<SignedIdentifier> permissions) { return client.queues().setAccessPolicyWithRestResponseAsync(queueName, permissions, null, null, Context.NONE) .map(VoidResponse::new); } /** * Deletes all messages in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Clear the messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.clearMessages} * * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue doesn't exist */ public Mono<VoidResponse> clearMessages() { return client.messages().clearWithRestResponseAsync(queueName, Context.NONE) .map(VoidResponse::new); } /** * Enqueues a message that has a time-to-live of 7 days and is instantly visible. * * <p><strong>Code Samples</strong></p> * * <p>Enqueue a message of "Hello, Azure"</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * @param messageText Message text * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist */ public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText) { return enqueueMessage(messageText, Duration.ofSeconds(0), Duration.ofDays(7)); } /** * Enqueues a message with a given time-to-live and a timeout period where the message is invisible in the queue. * * <p><strong>Code Samples</strong></p> * * <p>Add a message of "Hello, Azure" that has a timeout of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessage * * <p>Add a message of "Goodbye, Azure" that has a time to live of 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.enqueueMessageLiveTime * * @param messageText Message text * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If unset the value will default to 0 and the message will be instantly visible. The timeout must be between 0 * seconds and 7 days. * @param timeToLive Optional. How long the message will stay alive in the queue in seconds. If unset the value will * default to 7 days, if -1 is passed the message will not expire. The time to live must be -1 or any positive number. * @return A {@link EnqueuedMessage} value that contains the {@link EnqueuedMessage * {@link EnqueuedMessage * about the enqueued message. * @throws StorageErrorException If the queue doesn't exist or the {@code visibilityTimeout} or {@code timeToLive} * are outside of the allowed limits. */ public Mono<Response<EnqueuedMessage>> enqueueMessage(String messageText, Duration visibilityTimeout, Duration timeToLive) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); Integer timeToLiveInSeconds = (timeToLive == null) ? null : (int) timeToLive.getSeconds(); QueueMessage message = new QueueMessage().messageText(messageText); return client.messages().enqueueWithRestResponseAsync(queueName, message, visibilityTimeoutInSeconds, timeToLiveInSeconds, null, null, Context.NONE) .map(response -> new SimpleResponse<>(response, response.value().get(0))); } /** * Retrieves the first message in the queue and hides it from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue a message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages} * * @return The first {@link DequeuedMessage} in the queue, it contains * {@link DequeuedMessage * with the message, additionally it contains other metadata about the message. * @throws StorageErrorException If the queue doesn't exist */ public Flux<DequeuedMessage> dequeueMessages() { return dequeueMessages(1, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for 30 seconds. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DequeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages) { return dequeueMessages(maxMessages, Duration.ofSeconds(30)); } /** * Retrieves up to the maximum number of messages from the queue and hides them from other operations for the * timeout period. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue up to 5 messages and give them a 60 second timeout period</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.dequeueMessages * * @param maxMessages Optional. Maximum number of messages to get, if there are less messages exist in the queue than requested * all the messages will be returned. If left empty only 1 message will be retrieved, the allowed range is 1 to 32 * messages. * @param visibilityTimeout Optional. The timeout period for how long the message is invisible in the queue in seconds. * If left empty the dequeued messages will be invisible for 30 seconds. The timeout must be between 1 second and 7 days. * @return Up to {@code maxMessages} {@link DequeuedMessage DequeuedMessages} from the queue. Each DeqeuedMessage contains * {@link DequeuedMessage * with the message and other metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} or {@code visibilityTimeout} is * outside of the allowed bounds */ public Flux<DequeuedMessage> dequeueMessages(Integer maxMessages, Duration visibilityTimeout) { Integer visibilityTimeoutInSeconds = (visibilityTimeout == null) ? null : (int) visibilityTimeout.getSeconds(); return client.messages().dequeueWithRestResponseAsync(queueName, maxMessages, visibilityTimeoutInSeconds, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Peeks the first message in the queue. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages} * * @return A {@link PeekedMessage} that contains metadata about the message. */ public Flux<PeekedMessage> peekMessages() { return peekMessages(null); } /** * Peek messages from the front of the queue up to the maximum number of messages. * * Peeked messages don't contain the necessary information needed to interact with the message nor will it hide * messages from other operations on the queue. * * <p><strong>Code Samples</strong></p> * * <p>Peek up to the first five messages</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.peekMessages * * @param maxMessages Optional. Maximum number of messages to peek, if there are less messages exist in the queue than requested * all the messages will be peeked. If left empty only 1 message will be peeked, the allowed range is 1 to 32 * messages. * @return Up to {@code maxMessages} {@link PeekedMessage PeekedMessages} from the queue. Each PeekedMessage contains * metadata about the message. * @throws StorageErrorException If the queue doesn't exist or {@code maxMessages} is outside of the allowed bounds */ public Flux<PeekedMessage> peekMessages(Integer maxMessages) { return client.messages().peekWithRestResponseAsync(queueName, maxMessages, null, null, Context.NONE) .flatMapMany(response -> Flux.fromIterable(response.value())); } /** * Updates the specific message in the queue with a new message and resets the visibility timeout. * * <p><strong>Code Samples</strong></p> * * <p>Dequeue the first message and update it to "Hello again, Azure" and hide it for 5 seconds</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.updateMessage} * * @param messageText Updated value for the message * @param messageId Id of the message to update * @param popReceipt Unique identifier that must match for the message to be updated * @param visibilityTimeout The timeout period for how long the message is invisible in the queue in seconds. The * timeout period must be between 1 second and 7 days. * @return A {@link UpdatedMessage} that contains the new {@link UpdatedMessage * with the message, additionally contains the updated metadata about the message. * @throws StorageErrorException If the queue or messageId don't exist, the popReceipt doesn't match on the message, * or the {@code visibilityTimeout} is outside the allowed bounds */ public Mono<Response<UpdatedMessage>> updateMessage(String messageText, String messageId, String popReceipt, Duration visibilityTimeout) { QueueMessage message = new QueueMessage().messageText(messageText); return client.messageIds().updateWithRestResponseAsync(queueName, messageId, message, popReceipt, (int) visibilityTimeout.getSeconds(), Context.NONE) .map(this::getUpdatedMessageResponse); } /** * Deletes the specified message in the queue * * <p><strong>Code Samples</strong></p> * * <p>Delete the first message</p> * * {@codesnippet com.azure.storage.queue.queueAsyncClient.deleteMessage} * * @param messageId Id of the message to deleted * @param popReceipt Unique identifier that must match for the message to be deleted * @return A response that only contains headers and response status code * @throws StorageErrorException If the queue or messageId don't exist or the popReceipt doesn't match on the message */ public Mono<VoidResponse> deleteMessage(String messageId, String popReceipt) { return client.messageIds().deleteWithRestResponseAsync(queueName, messageId, popReceipt, Context.NONE) .map(VoidResponse::new); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<QueueProperties> getQueuePropertiesResponse(QueuesGetPropertiesResponse response) { QueueGetPropertiesHeaders propertiesHeaders = response.deserializedHeaders(); QueueProperties properties = new QueueProperties(propertiesHeaders.metadata(), propertiesHeaders.approximateMessagesCount()); return new SimpleResponse<>(response, properties); } /* * Maps the HTTP headers returned from the service to the expected response type * @param response Service response * @return Mapped response */ private Response<UpdatedMessage> getUpdatedMessageResponse(MessageIdsUpdateResponse response) { MessageIdUpdateHeaders headers = response.deserializedHeaders(); UpdatedMessage updatedMessage = new UpdatedMessage(headers.popReceipt(), headers.timeNextVisible()); return new SimpleResponse<>(response, updatedMessage); } }
There's a CheckUtil method you used in the same file that gives you the modifier. `final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken(modifiersToken);`
public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.IMPORT: final String importClassPath = FullIdent.createFullIdentBelow(token).getText(); final String className = importClassPath.substring(importClassPath.lastIndexOf(".") + 1); simpleClassNameToQualifiedNameMap.put(className, importClassPath); break; case TokenTypes.CLASS_DEF: final DetailAST modifiersToken = token.findFirstToken(TokenTypes.MODIFIERS); isPublicClass = modifiersToken.branchContains(TokenTypes.LITERAL_PUBLIC); break; case TokenTypes.METHOD_DEF: if (!isPublicClass) { return; } checkNoExternalDependencyExposed(token); break; default: break; } }
final DetailAST modifiersToken = token.findFirstToken(TokenTypes.MODIFIERS);
public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.IMPORT: final String importClassPath = FullIdent.createFullIdentBelow(token).getText(); final String className = importClassPath.substring(importClassPath.lastIndexOf(".") + 1); simpleClassNameToQualifiedNameMap.put(className, importClassPath); break; case TokenTypes.CLASS_DEF: final AccessModifier accessModifier = CheckUtil.getAccessModifierFromModifiersToken( token.findFirstToken(TokenTypes.MODIFIERS)); isPublicClass = accessModifier.equals(AccessModifier.PUBLIC); break; case TokenTypes.METHOD_DEF: if (!isPublicClass) { return; } checkNoExternalDependencyExposed(token); break; default: break; } }
class from external dependency. You should not use it as a return or method argument type."; private static final Set<String> VALID_DEPENDENCY_SET = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "java", "com.azure", "reactor", "io.netty.buffer.ByteBuf" ))); private final Map<String, String> simpleClassNameToQualifiedNameMap = new HashMap<>(); private boolean isPublicClass; @Override public void beginTree(DetailAST rootAST) { simpleClassNameToQualifiedNameMap.clear(); }
class from external dependency. You should not use it as a return or method argument type."; private static final Set<String> VALID_DEPENDENCY_SET = Collections.unmodifiableSet(new HashSet<>(Arrays.asList( "java", "com.azure", "reactor", "io.netty.buffer.ByteBuf" ))); private final Map<String, String> simpleClassNameToQualifiedNameMap = new HashMap<>(); private boolean isPublicClass; @Override public void beginTree(DetailAST rootAST) { simpleClassNameToQualifiedNameMap.clear(); }
nit: is this still relevant? It's feeling more and more like we're not going to take on auto-splitting for GA.
private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.partitionKey(); verifyPartitionKey(partitionKey); return sendLinkMono.flatMap(link -> { return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> send(Flux.fromIterable(list))); }); }
private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final String partitionKey = options.partitionKey(); verifyPartitionKey(partitionKey); return sendLinkMono.flatMap(link -> { return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .partitionKey(partitionKey) .maximumSizeInBytes(batchSize); return events.collect(new EventDataCollector(batchOptions, 1, () -> link.getErrorContext())); }) .flatMap(list -> send(Flux.fromIterable(list))); }); }
class EventHubProducer implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; /** * The default maximum allowable size, in bytes, for a batch to be sent. */ public static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final BatchOptions DEFAULT_BATCH_OPTIONS = new BatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducer.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final EventHubProducerOptions senderOptions; private final Mono<AmqpSendLink> sendLinkMono; private final boolean isPartitionSender; /** * Creates a new instance of this {@link EventHubProducer} that sends messages to {@link * EventHubProducerOptions * otherwise, allows the service to load balance the messages amongst available partitions. */ EventHubProducer(Mono<AmqpSendLink> amqpSendLinkMono, EventHubProducerOptions options) { this.sendLinkMono = amqpSendLinkMono.cache(); this.senderOptions = options; this.isPartitionSender = !ImplUtils.isNullOrEmpty(options.partitionId()); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(BatchOptions options) { Objects.requireNonNull(options); final BatchOptions clone = (BatchOptions) options.clone(); verifyPartitionKey(clone.partitionKey()); return sendLinkMono.flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.maximumSizeInBytes() > maximumLinkSize) { return Mono.error(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.maximumSizeInBytes(), maximumLinkSize))); } final int batchSize = clone.maximumSizeInBytes() > 0 ? clone.maximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.partitionKey(), () -> link.getErrorContext())); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event) { Objects.requireNonNull(event); return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { Objects.requireNonNull(event); Objects.requireNonNull(options); return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { Objects.requireNonNull(events); return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { Objects.requireNonNull(events); return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Publisher<EventData> events) { Objects.requireNonNull(events); return sendInternal(Flux.from(events), DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Publisher<EventData> events, SendOptions options) { Objects.requireNonNull(events); Objects.requireNonNull(options); return sendInternal(Flux.from(events), options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducer * @see EventHubProducer */ public Mono<Void> send(EventDataBatch batch) { Objects.requireNonNull(batch); if (batch.getEvents().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with partitionKey[{}], size[{}].", batch.getPartitionKey(), batch.getSize()); final List<Message> messages = EventDataUtil.toAmqpMessage(batch.getPartitionKey(), batch.getEvents()); return sendLinkMono.flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private Mono<Void> send(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private void verifyPartitionKey(String partitionKey) { if (ImplUtils.isNullOrEmpty(partitionKey)) { return; } if (isPartitionSender) { throw new IllegalArgumentException(String.format(Locale.US, "BatchOptions.partitionKey() cannot be set when an EventHubProducer is created with" + "EventHubProducerOptions.partitionId() set. This EventHubProducer can only send events to partition '%s'.", senderOptions.partitionId())); } else if (partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { throw new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH)); } } /** * Disposes of the {@link EventHubProducer} by closing the underlying connection to the service. * * @throws IOException if the underlying transport could not be closed and its resources could not be * disposed. */ @Override public void close() throws IOException { if (!isDisposed.getAndSet(true)) { final AmqpSendLink block = sendLinkMono.block(senderOptions.timeout()); if (block != null) { block.close(); } } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(BatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.maximumSizeInBytes() > 0 ? options.maximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.partitionKey(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(this.maxMessageSize, options.partitionKey(), contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducer implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; /** * The default maximum allowable size, in bytes, for a batch to be sent. */ public static final int MAX_MESSAGE_LENGTH_BYTES = 256 * 1024; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final BatchOptions DEFAULT_BATCH_OPTIONS = new BatchOptions(); private final ClientLogger logger = new ClientLogger(EventHubProducer.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final EventHubProducerOptions senderOptions; private final Mono<AmqpSendLink> sendLinkMono; private final boolean isPartitionSender; /** * Creates a new instance of this {@link EventHubProducer} that sends messages to {@link * EventHubProducerOptions * otherwise, allows the service to load balance the messages amongst available partitions. */ EventHubProducer(Mono<AmqpSendLink> amqpSendLinkMono, EventHubProducerOptions options) { this.sendLinkMono = amqpSendLinkMono.cache(); this.senderOptions = options; this.isPartitionSender = !ImplUtils.isNullOrEmpty(options.partitionId()); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(BatchOptions options) { Objects.requireNonNull(options); final BatchOptions clone = (BatchOptions) options.clone(); verifyPartitionKey(clone.partitionKey()); return sendLinkMono.flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.maximumSizeInBytes() > maximumLinkSize) { return Mono.error(new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.maximumSizeInBytes(), maximumLinkSize))); } final int batchSize = clone.maximumSizeInBytes() > 0 ? clone.maximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.partitionKey(), () -> link.getErrorContext())); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * * @param event Event to send to the service. * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event) { Objects.requireNonNull(event); return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * @return A {@link Mono} that completes when the event is pushed to the service. */ public Mono<Void> send(EventData event, SendOptions options) { Objects.requireNonNull(event); Objects.requireNonNull(options); return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events) { Objects.requireNonNull(events); return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Iterable<EventData> events, SendOptions options) { Objects.requireNonNull(events); return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Publisher<EventData> events) { Objects.requireNonNull(events); return sendInternal(Flux.from(events), DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * @return A {@link Mono} that completes when all events are pushed to the service. */ public Mono<Void> send(Publisher<EventData> events, SendOptions options) { Objects.requireNonNull(events); Objects.requireNonNull(options); return sendInternal(Flux.from(events), options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducer * @see EventHubProducer */ public Mono<Void> send(EventDataBatch batch) { Objects.requireNonNull(batch); if (batch.getEvents().isEmpty()) { logger.info("Cannot send an EventBatch that is empty."); return Mono.empty(); } logger.info("Sending batch with partitionKey[{}], size[{}].", batch.getPartitionKey(), batch.getSize()); final List<Message> messages = EventDataUtil.toAmqpMessage(batch.getPartitionKey(), batch.getEvents()); return sendLinkMono.flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private Mono<Void> send(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private void verifyPartitionKey(String partitionKey) { if (ImplUtils.isNullOrEmpty(partitionKey)) { return; } if (isPartitionSender) { throw new IllegalArgumentException(String.format(Locale.US, "BatchOptions.partitionKey() cannot be set when an EventHubProducer is created with" + "EventHubProducerOptions.partitionId() set. This EventHubProducer can only send events to partition '%s'.", senderOptions.partitionId())); } else if (partitionKey.length() > MAX_PARTITION_KEY_LENGTH) { throw new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", partitionKey, MAX_PARTITION_KEY_LENGTH)); } } /** * Disposes of the {@link EventHubProducer} by closing the underlying connection to the service. * * @throws IOException if the underlying transport could not be closed and its resources could not be * disposed. */ @Override public void close() throws IOException { if (!isDisposed.getAndSet(true)) { final AmqpSendLink block = sendLinkMono.block(senderOptions.timeout()); if (block != null) { block.close(); } } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(BatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.maximumSizeInBytes() > 0 ? options.maximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.partitionKey(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(this.maxMessageSize, options.partitionKey(), contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Just a formatting thing: I think it would be more readable to have both lambda statements on new lines.
public PagedFlux<KeyBase> listKeys() { return new PagedFlux<>(() -> listKeysFirstPage(), continuationToken -> listKeysNextPage(continuationToken)); }
continuationToken -> listKeysNextPage(continuationToken));
public PagedFlux<KeyBase> listKeys() { return new PagedFlux<>(() -> listKeysFirstPage(), continuationToken -> listKeysNextPage(continuationToken)); }
class KeyAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; static final String KEY_VAULT_SCOPE = "https: private final String endpoint; private final KeyService service; private final ClientLogger logger = new ClientLogger(KeyAsyncClient.class); /** * Creates a KeyAsyncClient that uses {@code pipeline} to service requests * * @param endpoint URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ KeyAsyncClient(URL endpoint, HttpPipeline pipeline) { Objects.requireNonNull(endpoint, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.endpoint = endpoint.toString(); this.service = RestProxy.create(KeyService.class, pipeline); } /** * Creates a new key and stores it in the key vault. The create key operation can be used to create any key type in * key vault. If the named key already exists, Azure Key Vault creates a new version of the key. It requires the {@code keys/create} permission. * * <p>The {@link KeyType keyType} indicates the type of key to create. Possible values include: {@link KeyType * {@link KeyType * * <p><strong>Code Samples</strong></p> * <p>Creates a new EC key. Subscribes to the call asynchronously and prints out the newly created key details when a response has been received.</p> * <pre> * keyAsyncClient.createKey("keyName", KeyType.EC).subscribe(keyResponse -&gt; * System.out.printf("Key is created with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id())); * </pre> * * @param name The name of the key being created. * @param keyType The type of key to create. For valid values, see {@link KeyType KeyType}. * @throws ResourceModifiedException if {@code name} or {@code keyType} is null. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> createKey(String name, KeyType keyType) { KeyRequestParameters parameters = new KeyRequestParameters().kty(keyType); return service.createKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Creating key - {}", name)) .doOnSuccess(response -> logger.info("Created key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to create key - {}", name, error)); } /** * Creates a new key and stores it in the key vault. The create key operation can be used to create any key type in * key vault. If the named key already exists, Azure Key Vault creates a new version of the key. It requires the {@code keys/create} permission. * * <p>The {@link KeyCreateOptions} is required. The {@link KeyCreateOptions * are optional. The {@link KeyCreateOptions * * <p>The {@link KeyCreateOptions * {@link KeyType * * <p><strong>Code Samples</strong></p> * <p>Creates a new Rsa key which activates in one day and expires in one year. Subscribes to the call asynchronously * and prints out the newly created key details when a response has been received.</p> * <pre> * KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) * .notBefore(OffsetDateTime.now().plusDays(1)) * .expires(OffsetDateTime.now().plusYears(1)); * * keyAsyncClient.createKey(keyCreateOptions).subscribe(keyResponse -&gt; * System.out.printf("Key is created with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id())); * </pre> * * @param keyCreateOptions The key configuration object containing information about the key being created. * @throws NullPointerException if {@code keyCreateOptions} is {@code null}. * @throws ResourceModifiedException if {@code keyCreateOptions} is malformed. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> createKey(KeyCreateOptions keyCreateOptions) { Objects.requireNonNull(keyCreateOptions, "The key options parameter cannot be null."); KeyRequestParameters parameters = new KeyRequestParameters() .kty(keyCreateOptions.keyType()) .keyOps(keyCreateOptions.keyOperations()) .keyAttributes(new KeyRequestAttributes(keyCreateOptions)); return service.createKey(endpoint, keyCreateOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Creating key - {}", keyCreateOptions.name())) .doOnSuccess(response -> logger.info("Created key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to create key - {}", keyCreateOptions.name(), error)); } /** * Creates a new Rsa key and stores it in the key vault. The create Rsa key operation can be used to create any Rsa key type in * key vault. If the named key already exists, Azure Key Vault creates a new version of the key. It requires the {@code keys/create} permission. * * <p>The {@link RsaKeyCreateOptions} is required. The {@link RsaKeyCreateOptions * and {@link RsaKeyCreateOptions * is set to true by Azure Key Vault, if not specified.</p> * * <p>The {@link RsaKeyCreateOptions * {@link KeyType * * <p><strong>Code Samples</strong></p> * <p>Creates a new RSA key with size 2048 which activates in one day and expires in one year. Subscribes to the call asynchronously * and prints out the newly created key details when a response has been received.</p> * <pre> * RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName", KeyType.RSA) * .keySize(2048) * .notBefore(OffsetDateTime.now().plusDays(1)) * .expires(OffsetDateTime.now().plusYears(1)); * * keyAsyncClient.createRsaKey(rsaKeyCreateOptions).subscribe(keyResponse -&gt; * System.out.printf("RSA Key is created with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id())); * </pre> * * @param rsaKeyCreateOptions The key configuration object containing information about the rsa key being created. * @throws NullPointerException if {@code rsaKeyCreateOptions} is {@code null}. * @throws ResourceModifiedException if {@code rsaKeyCreateOptions} is malformed. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> createRsaKey(RsaKeyCreateOptions rsaKeyCreateOptions) { Objects.requireNonNull(rsaKeyCreateOptions, "The Rsa key options parameter cannot be null."); KeyRequestParameters parameters = new KeyRequestParameters() .kty(rsaKeyCreateOptions.keyType()) .keySize(rsaKeyCreateOptions.keySize()) .keyOps(rsaKeyCreateOptions.keyOperations()) .keyAttributes(new KeyRequestAttributes(rsaKeyCreateOptions)); return service.createKey(endpoint, rsaKeyCreateOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Creating Rsa key - {}", rsaKeyCreateOptions.name())) .doOnSuccess(response -> logger.info("Created Rsa key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to create Rsa key - {}", rsaKeyCreateOptions.name(), error)); } /** * Creates a new Ec key and stores it in the key vault. The create Ec key operation can be used to create any Ec key type in * key vault. If the named key already exists, Azure Key Vault creates a new version of the key. It requires the {@code keys/create} permission. * * <p>The {@link EcKeyCreateOptions} parameter is required. The {@link EcKeyCreateOptions * default value of {@link KeyCurveName * are optional. The {@link EcKeyCreateOptions * * <p>The {@link EcKeyCreateOptions * {@link KeyType * * <p><strong>Code Samples</strong></p> * <p>Creates a new EC key with P-384 web key curve. The key activates in one day and expires in one year. Subscribes to the call asynchronously * and prints out the newly created ec key details when a response has been received.</p> * <pre> * EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName", KeyType.EC) * .curve(KeyCurveName.P_384) * .notBefore(OffsetDateTime.now().plusDays(1)) * .expires(OffsetDateTime.now().plusYears(1)); * * keyAsyncClient.createEcKey(ecKeyCreateOptions).subscribe(keyResponse -&gt; * System.out.printf("EC Key is created with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id())); * </pre> * * @param ecKeyCreateOptions The key options object containing information about the ec key being created. * @throws NullPointerException if {@code ecKeyCreateOptions} is {@code null}. * @throws ResourceModifiedException if {@code ecKeyCreateOptions} is malformed. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> createEcKey(EcKeyCreateOptions ecKeyCreateOptions) { Objects.requireNonNull(ecKeyCreateOptions, "The Ec key options options cannot be null."); KeyRequestParameters parameters = new KeyRequestParameters() .kty(ecKeyCreateOptions.keyType()) .curve(ecKeyCreateOptions.curve()) .keyOps(ecKeyCreateOptions.keyOperations()) .keyAttributes(new KeyRequestAttributes(ecKeyCreateOptions)); return service.createKey(endpoint, ecKeyCreateOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Creating Ec key - {}", ecKeyCreateOptions.name())) .doOnSuccess(response -> logger.info("Created Ec key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to create Ec key - {}", ecKeyCreateOptions.name(), error)); } /** * Imports an externally created key and stores it in key vault. The import key operation may be used to import any key type * into the Azure Key Vault. If the named key already exists, Azure Key Vault creates a new version of the key. This operation requires the {@code keys/import} permission. * * <p><strong>Code Samples</strong></p> * <p>Imports a new key into key vault. Subscribes to the call asynchronously and prints out the newly imported key details * when a response has been received.</p> * <pre> * keyAsyncClient.importKey("keyName", jsonWebKeyToImport).subscribe(keyResponse -&gt; * System.out.printf("Key is imported with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id())); * </pre> * * @param name The name for the imported key. * @param keyMaterial The Json web key being imported. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> importKey(String name, JsonWebKey keyMaterial) { KeyImportRequestParameters parameters = new KeyImportRequestParameters().key(keyMaterial); return service.importKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Importing key - {}", name)) .doOnSuccess(response -> logger.info("Imported key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to import key - {}", name, error)); } /** * Imports an externally created key and stores it in key vault. The import key operation may be used to import any key type * into the Azure Key Vault. If the named key already exists, Azure Key Vault creates a new version of the key. This operation requires the {@code keys/import} permission. * * <p>The {@code keyImportOptions} is required and its fields {@link KeyImportOptions * be null. The {@link KeyImportOptions * are optional. If not specified, no values are set for the fields. The {@link KeyImportOptions * the {@link KeyImportOptions * * <p><strong>Code Samples</strong></p> * <p>Imports a new key into key vault. Subscribes to the call asynchronously and prints out the newly imported key details * when a response has been received.</p> * <pre> * KeyImportOptions keyImportOptions = new KeyImportOptions("keyName", jsonWebKeyToImport) * .hsm(true) * .expires(OffsetDateTime.now().plusDays(60)); * * keyAsyncClient.importKey(keyImportOptions).subscribe(keyResponse -&gt; * System.out.printf("Key is imported with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id())); * </pre> * * @param keyImportOptions The key import configuration object containing information about the json web key being imported. * @throws NullPointerException if {@code keyImportOptions} is {@code null}. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> importKey(KeyImportOptions keyImportOptions) { Objects.requireNonNull(keyImportOptions, "The key import configuration parameter cannot be null."); KeyImportRequestParameters parameters = new KeyImportRequestParameters() .key(keyImportOptions.keyMaterial()) .hsm(keyImportOptions.hsm()) .keyAttributes(new KeyRequestAttributes(keyImportOptions)); return service.importKey(endpoint, keyImportOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Importing key - {}", keyImportOptions.name())) .doOnSuccess(response -> logger.info("Imported key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to import key - {}", keyImportOptions.name(), error)); } /** * Gets the public part of the specified key and key version. The get key operation is applicable to all key types and it requires the {@code keys/get} permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Subscribes to the call asynchronously and prints out the * returned key details when a response has been received.</p> * <pre> * String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; * keyAsyncClient.getKey("keyName", keyVersion).subscribe(keyResponse -&gt; * System.out.printf("Key returned with name %s, id %s and version %s", keyResponse.value().name(), * keyResponse.value().id(), keyResponse.value().version())); * </pre> * * @param name The name of the key, cannot be null * @param version The version of the key to retrieve. If this is an empty String or null, this call is equivalent to calling {@link KeyAsyncClient * @throws ResourceNotFoundException when a key with {@code name} and {@code version} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} or {@code version} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> getKey(String name, String version) { String keyVersion = ""; if (version != null) { keyVersion = version; } return service.getKey(endpoint, name, keyVersion, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Retrieving key - {}", name)) .doOnSuccess(response -> logger.info("Retrieved key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to get key - {}", name, error)); } /** * Get the public part of the latest version of the specified key from the key vault. The get key operation is applicable to * all key types and it requires the {@code keys/get} permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the key in the key vault. Subscribes to the call asynchronously and prints out the * returned key details when a response has been received.</p> * <pre> * keyAsyncClient.getKey("keyName").subscribe(keyResponse -&gt; * System.out.printf("Key with name %s, id %s \n", keyResponse.value().name(), * keyResponse.value().id())); * </pre> * * @param name The name of the key. * @throws ResourceNotFoundException when a key with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> getKey(String name) { return getKey(name, "") .doOnRequest(ignored -> logger.info("Retrieving key - {}", name)) .doOnSuccess(response -> logger.info("Retrieved key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to get key - {}", name, error)); } /** * Get public part of the key which represents {@link KeyBase keyBase} from the key vault. The get key operation is applicable * to all key types and it requires the {@code keys/get} permission. * * <p>The list operations {@link KeyAsyncClient * the {@link Flux} containing {@link KeyBase base key} as output excluding the key material of the key. * This operation can then be used to get the full key with its key material from {@code keyBase}.</p> * <pre> * keyAsyncClient.listKeys().subscribe(keyBase -&gt; * client.getKey(keyBase).subscribe(keyResponse -&gt; * System.out.printf("Key with name %s and value %s \n", keyResponse.value().name(), keyResponse.value().id()))); * </pre> * * @param keyBase The {@link KeyBase base key} holding attributes of the key being requested. * @throws ResourceNotFoundException when a key with {@link KeyBase * @throws HttpRequestException if {@link KeyBase
class KeyAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; static final String KEY_VAULT_SCOPE = "https: private final String endpoint; private final KeyService service; private final ClientLogger logger = new ClientLogger(KeyAsyncClient.class); /** * Creates a KeyAsyncClient that uses {@code pipeline} to service requests * * @param endpoint URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. */ KeyAsyncClient(URL endpoint, HttpPipeline pipeline) { Objects.requireNonNull(endpoint, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.endpoint = endpoint.toString(); this.service = RestProxy.create(KeyService.class, pipeline); } /** * Creates a new key and stores it in the key vault. The create key operation can be used to create any key type in * key vault. If the named key already exists, Azure Key Vault creates a new version of the key. It requires the {@code keys/create} permission. * * <p>The {@link KeyType keyType} indicates the type of key to create. Possible values include: {@link KeyType * {@link KeyType * * <p><strong>Code Samples</strong></p> * <p>Creates a new EC key. Subscribes to the call asynchronously and prints out the newly created key details when a response has been received.</p> * <pre> * keyAsyncClient.createKey("keyName", KeyType.EC).subscribe(keyResponse -&gt; * System.out.printf("Key is created with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id())); * </pre> * * @param name The name of the key being created. * @param keyType The type of key to create. For valid values, see {@link KeyType KeyType}. * @throws ResourceModifiedException if {@code name} or {@code keyType} is null. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> createKey(String name, KeyType keyType) { KeyRequestParameters parameters = new KeyRequestParameters().kty(keyType); return service.createKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Creating key - {}", name)) .doOnSuccess(response -> logger.info("Created key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to create key - {}", name, error)); } /** * Creates a new key and stores it in the key vault. The create key operation can be used to create any key type in * key vault. If the named key already exists, Azure Key Vault creates a new version of the key. It requires the {@code keys/create} permission. * * <p>The {@link KeyCreateOptions} is required. The {@link KeyCreateOptions * are optional. The {@link KeyCreateOptions * * <p>The {@link KeyCreateOptions * {@link KeyType * * <p><strong>Code Samples</strong></p> * <p>Creates a new Rsa key which activates in one day and expires in one year. Subscribes to the call asynchronously * and prints out the newly created key details when a response has been received.</p> * <pre> * KeyCreateOptions keyCreateOptions = new KeyCreateOptions("keyName", KeyType.RSA) * .notBefore(OffsetDateTime.now().plusDays(1)) * .expires(OffsetDateTime.now().plusYears(1)); * * keyAsyncClient.createKey(keyCreateOptions).subscribe(keyResponse -&gt; * System.out.printf("Key is created with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id())); * </pre> * * @param keyCreateOptions The key configuration object containing information about the key being created. * @throws NullPointerException if {@code keyCreateOptions} is {@code null}. * @throws ResourceModifiedException if {@code keyCreateOptions} is malformed. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> createKey(KeyCreateOptions keyCreateOptions) { Objects.requireNonNull(keyCreateOptions, "The key options parameter cannot be null."); KeyRequestParameters parameters = new KeyRequestParameters() .kty(keyCreateOptions.keyType()) .keyOps(keyCreateOptions.keyOperations()) .keyAttributes(new KeyRequestAttributes(keyCreateOptions)); return service.createKey(endpoint, keyCreateOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Creating key - {}", keyCreateOptions.name())) .doOnSuccess(response -> logger.info("Created key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to create key - {}", keyCreateOptions.name(), error)); } /** * Creates a new Rsa key and stores it in the key vault. The create Rsa key operation can be used to create any Rsa key type in * key vault. If the named key already exists, Azure Key Vault creates a new version of the key. It requires the {@code keys/create} permission. * * <p>The {@link RsaKeyCreateOptions} is required. The {@link RsaKeyCreateOptions * and {@link RsaKeyCreateOptions * is set to true by Azure Key Vault, if not specified.</p> * * <p>The {@link RsaKeyCreateOptions * {@link KeyType * * <p><strong>Code Samples</strong></p> * <p>Creates a new RSA key with size 2048 which activates in one day and expires in one year. Subscribes to the call asynchronously * and prints out the newly created key details when a response has been received.</p> * <pre> * RsaKeyCreateOptions rsaKeyCreateOptions = new RsaKeyCreateOptions("keyName", KeyType.RSA) * .keySize(2048) * .notBefore(OffsetDateTime.now().plusDays(1)) * .expires(OffsetDateTime.now().plusYears(1)); * * keyAsyncClient.createRsaKey(rsaKeyCreateOptions).subscribe(keyResponse -&gt; * System.out.printf("RSA Key is created with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id())); * </pre> * * @param rsaKeyCreateOptions The key configuration object containing information about the rsa key being created. * @throws NullPointerException if {@code rsaKeyCreateOptions} is {@code null}. * @throws ResourceModifiedException if {@code rsaKeyCreateOptions} is malformed. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> createRsaKey(RsaKeyCreateOptions rsaKeyCreateOptions) { Objects.requireNonNull(rsaKeyCreateOptions, "The Rsa key options parameter cannot be null."); KeyRequestParameters parameters = new KeyRequestParameters() .kty(rsaKeyCreateOptions.keyType()) .keySize(rsaKeyCreateOptions.keySize()) .keyOps(rsaKeyCreateOptions.keyOperations()) .keyAttributes(new KeyRequestAttributes(rsaKeyCreateOptions)); return service.createKey(endpoint, rsaKeyCreateOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Creating Rsa key - {}", rsaKeyCreateOptions.name())) .doOnSuccess(response -> logger.info("Created Rsa key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to create Rsa key - {}", rsaKeyCreateOptions.name(), error)); } /** * Creates a new Ec key and stores it in the key vault. The create Ec key operation can be used to create any Ec key type in * key vault. If the named key already exists, Azure Key Vault creates a new version of the key. It requires the {@code keys/create} permission. * * <p>The {@link EcKeyCreateOptions} parameter is required. The {@link EcKeyCreateOptions * default value of {@link KeyCurveName * are optional. The {@link EcKeyCreateOptions * * <p>The {@link EcKeyCreateOptions * {@link KeyType * * <p><strong>Code Samples</strong></p> * <p>Creates a new EC key with P-384 web key curve. The key activates in one day and expires in one year. Subscribes to the call asynchronously * and prints out the newly created ec key details when a response has been received.</p> * <pre> * EcKeyCreateOptions ecKeyCreateOptions = new EcKeyCreateOptions("keyName", KeyType.EC) * .curve(KeyCurveName.P_384) * .notBefore(OffsetDateTime.now().plusDays(1)) * .expires(OffsetDateTime.now().plusYears(1)); * * keyAsyncClient.createEcKey(ecKeyCreateOptions).subscribe(keyResponse -&gt; * System.out.printf("EC Key is created with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id())); * </pre> * * @param ecKeyCreateOptions The key options object containing information about the ec key being created. * @throws NullPointerException if {@code ecKeyCreateOptions} is {@code null}. * @throws ResourceModifiedException if {@code ecKeyCreateOptions} is malformed. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> createEcKey(EcKeyCreateOptions ecKeyCreateOptions) { Objects.requireNonNull(ecKeyCreateOptions, "The Ec key options options cannot be null."); KeyRequestParameters parameters = new KeyRequestParameters() .kty(ecKeyCreateOptions.keyType()) .curve(ecKeyCreateOptions.curve()) .keyOps(ecKeyCreateOptions.keyOperations()) .keyAttributes(new KeyRequestAttributes(ecKeyCreateOptions)); return service.createKey(endpoint, ecKeyCreateOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Creating Ec key - {}", ecKeyCreateOptions.name())) .doOnSuccess(response -> logger.info("Created Ec key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to create Ec key - {}", ecKeyCreateOptions.name(), error)); } /** * Imports an externally created key and stores it in key vault. The import key operation may be used to import any key type * into the Azure Key Vault. If the named key already exists, Azure Key Vault creates a new version of the key. This operation requires the {@code keys/import} permission. * * <p><strong>Code Samples</strong></p> * <p>Imports a new key into key vault. Subscribes to the call asynchronously and prints out the newly imported key details * when a response has been received.</p> * <pre> * keyAsyncClient.importKey("keyName", jsonWebKeyToImport).subscribe(keyResponse -&gt; * System.out.printf("Key is imported with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id())); * </pre> * * @param name The name for the imported key. * @param keyMaterial The Json web key being imported. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> importKey(String name, JsonWebKey keyMaterial) { KeyImportRequestParameters parameters = new KeyImportRequestParameters().key(keyMaterial); return service.importKey(endpoint, name, API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Importing key - {}", name)) .doOnSuccess(response -> logger.info("Imported key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to import key - {}", name, error)); } /** * Imports an externally created key and stores it in key vault. The import key operation may be used to import any key type * into the Azure Key Vault. If the named key already exists, Azure Key Vault creates a new version of the key. This operation requires the {@code keys/import} permission. * * <p>The {@code keyImportOptions} is required and its fields {@link KeyImportOptions * be null. The {@link KeyImportOptions * are optional. If not specified, no values are set for the fields. The {@link KeyImportOptions * the {@link KeyImportOptions * * <p><strong>Code Samples</strong></p> * <p>Imports a new key into key vault. Subscribes to the call asynchronously and prints out the newly imported key details * when a response has been received.</p> * <pre> * KeyImportOptions keyImportOptions = new KeyImportOptions("keyName", jsonWebKeyToImport) * .hsm(true) * .expires(OffsetDateTime.now().plusDays(60)); * * keyAsyncClient.importKey(keyImportOptions).subscribe(keyResponse -&gt; * System.out.printf("Key is imported with name %s and id %s \n", keyResponse.value().name(), keyResponse.value().id())); * </pre> * * @param keyImportOptions The key import configuration object containing information about the json web key being imported. * @throws NullPointerException if {@code keyImportOptions} is {@code null}. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> importKey(KeyImportOptions keyImportOptions) { Objects.requireNonNull(keyImportOptions, "The key import configuration parameter cannot be null."); KeyImportRequestParameters parameters = new KeyImportRequestParameters() .key(keyImportOptions.keyMaterial()) .hsm(keyImportOptions.hsm()) .keyAttributes(new KeyRequestAttributes(keyImportOptions)); return service.importKey(endpoint, keyImportOptions.name(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Importing key - {}", keyImportOptions.name())) .doOnSuccess(response -> logger.info("Imported key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to import key - {}", keyImportOptions.name(), error)); } /** * Gets the public part of the specified key and key version. The get key operation is applicable to all key types and it requires the {@code keys/get} permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the key in the key vault. Subscribes to the call asynchronously and prints out the * returned key details when a response has been received.</p> * <pre> * String keyVersion = "6A385B124DEF4096AF1361A85B16C204"; * keyAsyncClient.getKey("keyName", keyVersion).subscribe(keyResponse -&gt; * System.out.printf("Key returned with name %s, id %s and version %s", keyResponse.value().name(), * keyResponse.value().id(), keyResponse.value().version())); * </pre> * * @param name The name of the key, cannot be null * @param version The version of the key to retrieve. If this is an empty String or null, this call is equivalent to calling {@link KeyAsyncClient * @throws ResourceNotFoundException when a key with {@code name} and {@code version} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} or {@code version} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> getKey(String name, String version) { String keyVersion = ""; if (version != null) { keyVersion = version; } return service.getKey(endpoint, name, keyVersion, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE) .doOnRequest(ignored -> logger.info("Retrieving key - {}", name)) .doOnSuccess(response -> logger.info("Retrieved key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to get key - {}", name, error)); } /** * Get the public part of the latest version of the specified key from the key vault. The get key operation is applicable to * all key types and it requires the {@code keys/get} permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the key in the key vault. Subscribes to the call asynchronously and prints out the * returned key details when a response has been received.</p> * <pre> * keyAsyncClient.getKey("keyName").subscribe(keyResponse -&gt; * System.out.printf("Key with name %s, id %s \n", keyResponse.value().name(), * keyResponse.value().id())); * </pre> * * @param name The name of the key. * @throws ResourceNotFoundException when a key with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ public Mono<Response<Key>> getKey(String name) { return getKey(name, "") .doOnRequest(ignored -> logger.info("Retrieving key - {}", name)) .doOnSuccess(response -> logger.info("Retrieved key - {}", response.value().name())) .doOnError(error -> logger.warning("Failed to get key - {}", name, error)); } /** * Get public part of the key which represents {@link KeyBase keyBase} from the key vault. The get key operation is applicable * to all key types and it requires the {@code keys/get} permission. * * <p>The list operations {@link KeyAsyncClient * the {@link Flux} containing {@link KeyBase base key} as output excluding the key material of the key. * This operation can then be used to get the full key with its key material from {@code keyBase}.</p> * <pre> * keyAsyncClient.listKeys().subscribe(keyBase -&gt; * client.getKey(keyBase).subscribe(keyResponse -&gt; * System.out.printf("Key with name %s and value %s \n", keyResponse.value().name(), keyResponse.value().id()))); * </pre> * * @param keyBase The {@link KeyBase base key} holding attributes of the key being requested. * @throws ResourceNotFoundException when a key with {@link KeyBase * @throws HttpRequestException if {@link KeyBase