comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Let's make this log message a constant string as it doesn't have any instance data
public static void logStringToSign(ClientLogger logger, String stringToSign, Context context) { if (context != null && Boolean.TRUE.equals(context.getData(Constants.STORAGE_LOG_STRING_TO_SIGN).orElse(false))) { logger.info("The string to sign computed by the SDK is: {}{}", stringToSign, System.lineSeparator()); logger.warning("Please remember to disable '{}' before going to production as this " + "string can potentially contain PII.", Constants.STORAGE_LOG_STRING_TO_SIGN); } }
logger.warning("Please remember to disable '{}' before going to production as this "
public static void logStringToSign(ClientLogger logger, String stringToSign, Context context) { if (context != null && Boolean.TRUE.equals(context.getData(Constants.STORAGE_LOG_STRING_TO_SIGN).orElse(false))) { logger.info(STRING_TO_SIGN_LOG_INFO_MESSAGE, stringToSign, System.lineSeparator()); logger.warning(STRING_TO_SIGN_LOG_WARNING_MESSAGE, Constants.STORAGE_LOG_STRING_TO_SIGN); } }
class StorageImplUtils { private static final ClientLogger LOGGER = new ClientLogger(StorageImplUtils.class); private static final String ARGUMENT_NULL_OR_EMPTY = "The argument must not be null or an empty string. Argument name: %s."; private static final String PARAMETER_NOT_IN_RANGE = "The value of the parameter '%s' should be between %s and %s."; private static final String NO_PATH_SEGMENTS = "URL %s does not contain path segments."; /** * Parses the query string into a key-value pair map that maintains key, query parameter key, order. The value is * stored as a string (ex. key=val1,val2,val3 instead of key=[val1, val2, val3]). * * @param queryString Query string to parse * @return a mapping of query string pieces as key-value pairs. */ public static Map<String, String> parseQueryString(final String queryString) { return parseQueryStringHelper(queryString, Utility::urlDecode); } /** * Parses the query string into a key-value pair map that maintains key, query parameter key, order. The value is * stored as a parsed array (ex. key=[val1, val2, val3] instead of key=val1,val2,val3). * * @param queryString Query string to parse * @return a mapping of query string pieces as key-value pairs. */ public static Map<String, String[]> parseQueryStringSplitValues(final String queryString) { return parseQueryStringHelper(queryString, value -> { String[] v = value.split(","); String[] ret = new String[v.length]; for (int i = 0; i < v.length; i++) { ret[i] = urlDecode(v[i]); } return ret; }); } private static <T> Map<String, T> parseQueryStringHelper(final String queryString, Function<String, T> valueParser) { TreeMap<String, T> pieces = new TreeMap<>(); if (CoreUtils.isNullOrEmpty(queryString)) { return pieces; } for (String kvp : queryString.split("&")) { int equalIndex = kvp.indexOf("="); String key = urlDecode(kvp.substring(0, equalIndex).toLowerCase(Locale.ROOT)); T value = valueParser.apply(kvp.substring(equalIndex + 1)); pieces.putIfAbsent(key, value); } return pieces; } /** * Blocks an asynchronous response with an optional timeout. * * @param response Asynchronous response to block * @param timeout Optional timeout * @param <T> Return type of the asynchronous response * @return the value of the asynchronous response * @throws RuntimeException If the asynchronous response doesn't complete before the timeout expires. */ public static <T> T blockWithOptionalTimeout(Mono<T> response, Duration timeout) { if (timeout == null) { return response.block(); } else { return response.block(timeout); } } /** * Applies a timeout to a publisher if the given timeout is not null. * * @param publisher Mono to apply optional timeout to. * @param timeout Optional timeout. * @param <T> Return type of the Mono. * @return Mono with an applied timeout, if any. */ public static <T> Mono<T> applyOptionalTimeout(Mono<T> publisher, Duration timeout) { return timeout == null ? publisher : publisher.timeout(timeout); } /** * Applies a timeout to a publisher if the given timeout is not null. * * @param publisher Flux to apply optional timeout to. * @param timeout Optional timeout. * @param <T> Return type of the Flux. * @return Flux with an applied timeout, if any. */ public static <T> Flux<T> applyOptionalTimeout(Flux<T> publisher, Duration timeout) { return timeout == null ? publisher : publisher.timeout(timeout); } /** * Asserts that a value is not {@code null}. * * @param param Name of the parameter * @param value Value of the parameter * @throws NullPointerException If {@code value} is {@code null} */ public static void assertNotNull(final String param, final Object value) { if (value == null) { throw new NullPointerException(String.format(Locale.ROOT, ARGUMENT_NULL_OR_EMPTY, param)); } } /** * Asserts that the specified number is in the valid range. The range is inclusive. * * @param param Name of the parameter * @param value Value of the parameter * @param min The minimum allowed value * @param max The maximum allowed value * @throws IllegalArgumentException If {@code value} is less than {@code min} or {@code value} is greater than * {@code max}. */ public static void assertInBounds(final String param, final long value, final long min, final long max) { if (value < min || value > max) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(Locale.ROOT, PARAMETER_NOT_IN_RANGE, param, min, max))); } } /** * Computes a signature for the specified string using the HMAC-SHA256 algorithm. * * @param base64Key Base64 encoded key used to sign the string * @param stringToSign UTF-8 encoded string to sign * @return the HMAC-SHA256 encoded signature * @throws RuntimeException If the HMAC-SHA256 algorithm isn't support, if the key isn't a valid Base64 encoded * string, or the UTF-8 charset isn't supported. */ public static String computeHMac256(final String base64Key, final String stringToSign) { try { byte[] key = Base64.getDecoder().decode(base64Key); Mac hmacSHA256 = Mac.getInstance("HmacSHA256"); hmacSHA256.init(new SecretKeySpec(key, "HmacSHA256")); byte[] utf8Bytes = stringToSign.getBytes(StandardCharsets.UTF_8); return Base64.getEncoder().encodeToString(hmacSHA256.doFinal(utf8Bytes)); } catch (NoSuchAlgorithmException | InvalidKeyException ex) { throw new RuntimeException(ex); } } /** * Appends a string to the end of the passed URL's path. * * @param baseURL URL having a path appended * @param name Name of the path * @return a URL with the path appended. * @throws IllegalArgumentException If {@code name} causes the URL to become malformed. */ public static URL appendToUrlPath(String baseURL, String name) { UrlBuilder builder = UrlBuilder.parse(baseURL); if (builder.getPath() == null) { builder.setPath("/"); } else if (!builder.getPath().endsWith("/")) { builder.setPath(builder.getPath() + "/"); } builder.setPath(builder.getPath() + name); try { return builder.toUrl(); } catch (MalformedURLException ex) { throw new IllegalArgumentException(ex); } } /** * Strips the last path segment from the passed URL. * * @param baseUrl URL having its last path segment stripped * @return a URL with the path segment stripped. * @throws IllegalArgumentException If stripping the last path segment causes the URL to become malformed or it * doesn't contain any path segments. */ public static URL stripLastPathSegment(URL baseUrl) { UrlBuilder builder = UrlBuilder.parse(baseUrl); if (builder.getPath() == null || !builder.getPath().contains("/")) { throw new IllegalArgumentException(String.format(Locale.ROOT, NO_PATH_SEGMENTS, baseUrl)); } builder.setPath(builder.getPath().substring(0, builder.getPath().lastIndexOf("/"))); try { return builder.toUrl(); } catch (MalformedURLException ex) { throw new IllegalArgumentException(ex); } } /** * Strips the account name from host part of the URL object. * * @param url URL having its hostanme * @return account name. */ public static String getAccountName(URL url) { UrlBuilder builder = UrlBuilder.parse(url); String accountName = null; String host = builder.getHost(); if (!CoreUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { accountName = host; } else { accountName = host.substring(0, accountNameIndex); } } return accountName; } /** Returns an empty string if value is {@code null}, otherwise returns value * @param value The value to check and return. * @return The value or empty string. */ public static String emptyIfNull(String value) { return value == null ? "" : value; } /** * Reads data from an input stream and writes it to an output stream. * @param source {@link InputStream source} * @param writeLength The length of data to write. * @param destination {@link OutputStream destination} * @throws IOException If an I/O error occurs. */ public static void copyToOutputStream(InputStream source, long writeLength, OutputStream destination) throws IOException { StorageImplUtils.assertNotNull("source", source); StorageImplUtils.assertNotNull("destination", destination); final byte[] retrievedBuff = new byte[Constants.BUFFER_COPY_LENGTH]; int nextCopy = (int) Math.min(retrievedBuff.length, writeLength); int count = source.read(retrievedBuff, 0, nextCopy); while (nextCopy > 0 && count != -1) { destination.write(retrievedBuff, 0, count); nextCopy = (int) Math.min(retrievedBuff.length, writeLength); count = source.read(retrievedBuff, 0, nextCopy); } } /** * Logs the string to sign if a valid context is provided. * * @param logger {@link ClientLogger} * @param stringToSign The string to sign to log. * @param context Additional context to determine if the string to sign should be logged. */ /** * Converts the storage exception message. * * @param message The storage exception message * @param response The storage service response. * @return The converted storage exception message. */ public static String convertStorageExceptionMessage(String message, HttpResponse response) { if (response != null) { if (response.getStatusCode() == 403) { return String.format("If you are using a StorageSharedKeyCredential, and the server returned an " + "error message that says 'Signature did not match', you can compare the string to sign with" + " the one generated by the SDK. To log the string to sign, pass in the context key value pair " + "'%s': true to the appropriate method call.%n" + "If you are using a SAS token, and the server returned an error message that says " + "'Signature did not match', you can compare the string to sign with" + " the one generated by the SDK. To log the string to sign, pass in the context key value " + "pair '%s': true to the appropriate generateSas method call.%n" + "Please remember to disable '%s' before going to production as this string can potentially " + "contain PII.%n", Constants.STORAGE_LOG_STRING_TO_SIGN, Constants.STORAGE_LOG_STRING_TO_SIGN, Constants.STORAGE_LOG_STRING_TO_SIGN) + message; } if (response.getRequest() != null && response.getRequest().getHttpMethod() != null && response.getRequest().getHttpMethod().equals(HttpMethod.HEAD) && response.getHeaders().getValue(ERROR_CODE) != null) { return message.replaceFirst("(empty body)", response.getHeaders().getValue(ERROR_CODE)); } } return message; } }
class StorageImplUtils { private static final ClientLogger LOGGER = new ClientLogger(StorageImplUtils.class); private static final String ARGUMENT_NULL_OR_EMPTY = "The argument must not be null or an empty string. Argument name: %s."; private static final String PARAMETER_NOT_IN_RANGE = "The value of the parameter '%s' should be between %s and %s."; private static final String NO_PATH_SEGMENTS = "URL %s does not contain path segments."; private static final String STRING_TO_SIGN_LOG_INFO_MESSAGE = "The string to sign computed by the SDK is: {}{}"; private static final String STRING_TO_SIGN_LOG_WARNING_MESSAGE = "Please remember to disable '{}' before going " + "to production as this string can potentially contain PII."; private static final String STORAGE_EXCEPTION_LOG_STRING_TO_SIGN_MESSAGE = String.format( "If you are using a StorageSharedKeyCredential, and the server returned an " + "error message that says 'Signature did not match', you can compare the string to sign with" + " the one generated by the SDK. To log the string to sign, pass in the context key value pair " + "'%s': true to the appropriate method call.%n" + "If you are using a SAS token, and the server returned an error message that says " + "'Signature did not match', you can compare the string to sign with" + " the one generated by the SDK. To log the string to sign, pass in the context key value " + "pair '%s': true to the appropriate generateSas method call.%n" + "Please remember to disable '%s' before going to production as this string can potentially " + "contain PII.%n", Constants.STORAGE_LOG_STRING_TO_SIGN, Constants.STORAGE_LOG_STRING_TO_SIGN, Constants.STORAGE_LOG_STRING_TO_SIGN); /** * Parses the query string into a key-value pair map that maintains key, query parameter key, order. The value is * stored as a string (ex. key=val1,val2,val3 instead of key=[val1, val2, val3]). * * @param queryString Query string to parse * @return a mapping of query string pieces as key-value pairs. */ public static Map<String, String> parseQueryString(final String queryString) { return parseQueryStringHelper(queryString, Utility::urlDecode); } /** * Parses the query string into a key-value pair map that maintains key, query parameter key, order. The value is * stored as a parsed array (ex. key=[val1, val2, val3] instead of key=val1,val2,val3). * * @param queryString Query string to parse * @return a mapping of query string pieces as key-value pairs. */ public static Map<String, String[]> parseQueryStringSplitValues(final String queryString) { return parseQueryStringHelper(queryString, value -> { String[] v = value.split(","); String[] ret = new String[v.length]; for (int i = 0; i < v.length; i++) { ret[i] = urlDecode(v[i]); } return ret; }); } private static <T> Map<String, T> parseQueryStringHelper(final String queryString, Function<String, T> valueParser) { TreeMap<String, T> pieces = new TreeMap<>(); if (CoreUtils.isNullOrEmpty(queryString)) { return pieces; } for (String kvp : queryString.split("&")) { int equalIndex = kvp.indexOf("="); String key = urlDecode(kvp.substring(0, equalIndex).toLowerCase(Locale.ROOT)); T value = valueParser.apply(kvp.substring(equalIndex + 1)); pieces.putIfAbsent(key, value); } return pieces; } /** * Blocks an asynchronous response with an optional timeout. * * @param response Asynchronous response to block * @param timeout Optional timeout * @param <T> Return type of the asynchronous response * @return the value of the asynchronous response * @throws RuntimeException If the asynchronous response doesn't complete before the timeout expires. */ public static <T> T blockWithOptionalTimeout(Mono<T> response, Duration timeout) { if (timeout == null) { return response.block(); } else { return response.block(timeout); } } /** * Applies a timeout to a publisher if the given timeout is not null. * * @param publisher Mono to apply optional timeout to. * @param timeout Optional timeout. * @param <T> Return type of the Mono. * @return Mono with an applied timeout, if any. */ public static <T> Mono<T> applyOptionalTimeout(Mono<T> publisher, Duration timeout) { return timeout == null ? publisher : publisher.timeout(timeout); } /** * Applies a timeout to a publisher if the given timeout is not null. * * @param publisher Flux to apply optional timeout to. * @param timeout Optional timeout. * @param <T> Return type of the Flux. * @return Flux with an applied timeout, if any. */ public static <T> Flux<T> applyOptionalTimeout(Flux<T> publisher, Duration timeout) { return timeout == null ? publisher : publisher.timeout(timeout); } /** * Asserts that a value is not {@code null}. * * @param param Name of the parameter * @param value Value of the parameter * @throws NullPointerException If {@code value} is {@code null} */ public static void assertNotNull(final String param, final Object value) { if (value == null) { throw new NullPointerException(String.format(Locale.ROOT, ARGUMENT_NULL_OR_EMPTY, param)); } } /** * Asserts that the specified number is in the valid range. The range is inclusive. * * @param param Name of the parameter * @param value Value of the parameter * @param min The minimum allowed value * @param max The maximum allowed value * @throws IllegalArgumentException If {@code value} is less than {@code min} or {@code value} is greater than * {@code max}. */ public static void assertInBounds(final String param, final long value, final long min, final long max) { if (value < min || value > max) { throw LOGGER.logExceptionAsError(new IllegalArgumentException(String.format(Locale.ROOT, PARAMETER_NOT_IN_RANGE, param, min, max))); } } /** * Computes a signature for the specified string using the HMAC-SHA256 algorithm. * * @param base64Key Base64 encoded key used to sign the string * @param stringToSign UTF-8 encoded string to sign * @return the HMAC-SHA256 encoded signature * @throws RuntimeException If the HMAC-SHA256 algorithm isn't support, if the key isn't a valid Base64 encoded * string, or the UTF-8 charset isn't supported. */ public static String computeHMac256(final String base64Key, final String stringToSign) { try { byte[] key = Base64.getDecoder().decode(base64Key); Mac hmacSHA256 = Mac.getInstance("HmacSHA256"); hmacSHA256.init(new SecretKeySpec(key, "HmacSHA256")); byte[] utf8Bytes = stringToSign.getBytes(StandardCharsets.UTF_8); return Base64.getEncoder().encodeToString(hmacSHA256.doFinal(utf8Bytes)); } catch (NoSuchAlgorithmException | InvalidKeyException ex) { throw new RuntimeException(ex); } } /** * Appends a string to the end of the passed URL's path. * * @param baseURL URL having a path appended * @param name Name of the path * @return a URL with the path appended. * @throws IllegalArgumentException If {@code name} causes the URL to become malformed. */ public static URL appendToUrlPath(String baseURL, String name) { UrlBuilder builder = UrlBuilder.parse(baseURL); if (builder.getPath() == null) { builder.setPath("/"); } else if (!builder.getPath().endsWith("/")) { builder.setPath(builder.getPath() + "/"); } builder.setPath(builder.getPath() + name); try { return builder.toUrl(); } catch (MalformedURLException ex) { throw new IllegalArgumentException(ex); } } /** * Strips the last path segment from the passed URL. * * @param baseUrl URL having its last path segment stripped * @return a URL with the path segment stripped. * @throws IllegalArgumentException If stripping the last path segment causes the URL to become malformed or it * doesn't contain any path segments. */ public static URL stripLastPathSegment(URL baseUrl) { UrlBuilder builder = UrlBuilder.parse(baseUrl); if (builder.getPath() == null || !builder.getPath().contains("/")) { throw new IllegalArgumentException(String.format(Locale.ROOT, NO_PATH_SEGMENTS, baseUrl)); } builder.setPath(builder.getPath().substring(0, builder.getPath().lastIndexOf("/"))); try { return builder.toUrl(); } catch (MalformedURLException ex) { throw new IllegalArgumentException(ex); } } /** * Strips the account name from host part of the URL object. * * @param url URL having its hostanme * @return account name. */ public static String getAccountName(URL url) { UrlBuilder builder = UrlBuilder.parse(url); String accountName = null; String host = builder.getHost(); if (!CoreUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { accountName = host; } else { accountName = host.substring(0, accountNameIndex); } } return accountName; } /** Returns an empty string if value is {@code null}, otherwise returns value * @param value The value to check and return. * @return The value or empty string. */ public static String emptyIfNull(String value) { return value == null ? "" : value; } /** * Reads data from an input stream and writes it to an output stream. * @param source {@link InputStream source} * @param writeLength The length of data to write. * @param destination {@link OutputStream destination} * @throws IOException If an I/O error occurs. */ public static void copyToOutputStream(InputStream source, long writeLength, OutputStream destination) throws IOException { StorageImplUtils.assertNotNull("source", source); StorageImplUtils.assertNotNull("destination", destination); final byte[] retrievedBuff = new byte[Constants.BUFFER_COPY_LENGTH]; int nextCopy = (int) Math.min(retrievedBuff.length, writeLength); int count = source.read(retrievedBuff, 0, nextCopy); while (nextCopy > 0 && count != -1) { destination.write(retrievedBuff, 0, count); nextCopy = (int) Math.min(retrievedBuff.length, writeLength); count = source.read(retrievedBuff, 0, nextCopy); } } /** * Logs the string to sign if a valid context is provided. * * @param logger {@link ClientLogger} * @param stringToSign The string to sign to log. * @param context Additional context to determine if the string to sign should be logged. */ /** * Converts the storage exception message. * * @param message The storage exception message * @param response The storage service response. * @return The converted storage exception message. */ public static String convertStorageExceptionMessage(String message, HttpResponse response) { if (response != null) { if (response.getStatusCode() == 403) { return STORAGE_EXCEPTION_LOG_STRING_TO_SIGN_MESSAGE + message; } if (response.getRequest() != null && response.getRequest().getHttpMethod() != null && response.getRequest().getHttpMethod().equals(HttpMethod.HEAD) && response.getHeaders().getValue(ERROR_CODE) != null) { return message.replaceFirst("(empty body)", response.getHeaders().getValue(ERROR_CODE)); } } return message; } }
serializing diagnostics is very expensive and will have perf impact. Now that we are doing that in the exception stacktrace for each exception we should exclude the business logic errors: - NotFound (with no substatus code, i.e., for READ_SESSION_NOT_AVAILABLE we need diagnostics), - Conflict, - PreconditionFailed. These are business logic failures we don't need to the diagnostics for this. NotFound with READ_SESSION_NOT_AVAILABLE substatus code is a consistency failure not business logic we should have diagnostics for that.
public String toString() { try { ObjectNode exceptionMessageNode = mapper.createObjectNode(); exceptionMessageNode.put("ClassName", getClass().getSimpleName()); exceptionMessageNode.put(USER_AGENT_KEY, USER_AGENT); exceptionMessageNode.put("statusCode", statusCode); exceptionMessageNode.put("resourceAddress", resourceAddress); if (cosmosError != null) { exceptionMessageNode.put("error", cosmosError.toJson()); } exceptionMessageNode.put("innerErrorMessage", innerErrorMessage()); exceptionMessageNode.put("causeInfo", causeInfo()); if (responseHeaders != null) { exceptionMessageNode.put("responseHeaders", responseHeaders.toString()); } List<Map.Entry<String, String>> filterRequestHeaders = filterSensitiveData(requestHeaders); if (filterRequestHeaders != null) { exceptionMessageNode.put("requestHeaders", filterRequestHeaders.toString()); } if(this.cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null); } return mapper.writeValueAsString(exceptionMessageNode); } catch (JsonProcessingException ex) { return getClass().getSimpleName() + "{" + USER_AGENT_KEY +"=" + USER_AGENT + ", error=" + cosmosError + ", " + "resourceAddress='" + resourceAddress + ", statusCode=" + statusCode + ", message=" + getMessage() + ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders=" + filterSensitiveData(requestHeaders) + '}'; } }
cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null);
public String toString() { try { ObjectNode exceptionMessageNode = mapper.createObjectNode(); exceptionMessageNode.put("ClassName", getClass().getSimpleName()); exceptionMessageNode.put(USER_AGENT_KEY, USER_AGENT); exceptionMessageNode.put("statusCode", statusCode); exceptionMessageNode.put("resourceAddress", resourceAddress); if (cosmosError != null) { exceptionMessageNode.put("error", cosmosError.toJson()); } exceptionMessageNode.put("innerErrorMessage", innerErrorMessage()); exceptionMessageNode.put("causeInfo", causeInfo()); if (responseHeaders != null) { exceptionMessageNode.put("responseHeaders", responseHeaders.toString()); } List<Map.Entry<String, String>> filterRequestHeaders = filterSensitiveData(requestHeaders); if (filterRequestHeaders != null) { exceptionMessageNode.put("requestHeaders", filterRequestHeaders.toString()); } if(this.cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null); } return mapper.writeValueAsString(exceptionMessageNode); } catch (JsonProcessingException ex) { return getClass().getSimpleName() + "{" + USER_AGENT_KEY +"=" + USER_AGENT + ", error=" + cosmosError + ", " + "resourceAddress='" + resourceAddress + ", statusCode=" + statusCode + ", message=" + getMessage() + ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders=" + filterSensitiveData(requestHeaders) + '}'; } }
class CosmosException extends AzureException { private static final long serialVersionUID = 1L; private static final ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final static String USER_AGENT = Utils.getUserAgent(); private final int statusCode; private final Map<String, String> responseHeaders; private CosmosDiagnostics cosmosDiagnostics; private RequestTimeline requestTimeline; private CosmosError cosmosError; private int rntbdChannelTaskQueueSize; private RntbdEndpointStatistics rntbdEndpointStatistics; long lsn; String partitionKeyRangeId; Map<String, String> requestHeaders; Uri requestUri; String resourceAddress; private int requestPayloadLength; private int rntbdPendingRequestQueueSize; private int rntbdRequestLength; private int rntbdResponseLength; private boolean sendingRequestHasStarted; protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) { super(message, cause); this.statusCode = statusCode; this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param errorMessage the error message. */ protected CosmosException(int statusCode, String errorMessage) { this(statusCode, errorMessage, null, null); this.cosmosError = new CosmosError(); ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param innerException the original exception. */ protected CosmosException(int statusCode, Exception innerException) { this(statusCode, null, null, innerException); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders, Throwable cause) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, cause); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param message the string message. * @param statusCode the http status code of the response. * @param exception the exception object. * @param responseHeaders the response headers. * @param resourceAddress the address of the resource the request is associated with. */ protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode, String resourceAddress) { this(statusCode, message, responseHeaders, exception); this.resourceAddress = resourceAddress; } @Override public String getMessage() { try { ObjectNode messageNode = mapper.createObjectNode(); messageNode.put("innerErrorMessage", innerErrorMessage()); if (cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(messageNode, null); } return mapper.writeValueAsString(messageNode); } catch (JsonProcessingException e) { if (cosmosDiagnostics == null) { return innerErrorMessage(); } return innerErrorMessage() + ", " + cosmosDiagnostics.toString(); } } /** * Gets the activity ID associated with the request. * * @return the activity ID. */ public String getActivityId() { if (this.responseHeaders != null) { return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID); } return null; } /** * Gets the http status code. * * @return the status code. */ public int getStatusCode() { return this.statusCode; } /** * Gets the sub status code. * * @return the status code. */ public int getSubStatusCode() { int code = HttpConstants.SubStatusCodes.UNKNOWN; if (this.responseHeaders != null) { String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS); if (StringUtils.isNotEmpty(subStatusString)) { try { code = Integer.parseInt(subStatusString); } catch (NumberFormatException e) { } } } return code; } void setSubStatusCode(int subStatusCode) { this.responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, Integer.toString(subStatusCode)); } /** * Gets the error code associated with the exception. * * @return the error. */ CosmosError getError() { return this.cosmosError; } void setError(CosmosError cosmosError) { this.cosmosError = cosmosError; } /** * Gets the recommended time duration after which the client can retry failed * requests * * @return the recommended time duration after which the client can retry failed * requests. */ public Duration getRetryAfterDuration() { long retryIntervalInMilliseconds = 0; if (this.responseHeaders != null) { String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); if (StringUtils.isNotEmpty(header)) { try { retryIntervalInMilliseconds = Long.parseLong(header); } catch (NumberFormatException e) { } } } return Duration.ofMillis(retryIntervalInMilliseconds); } /** * Gets the response headers as key-value pairs * * @return the response headers */ public Map<String, String> getResponseHeaders() { return this.responseHeaders; } /** * Gets the resource address associated with this exception. * * @return the resource address associated with this exception. */ String getResourceAddress() { return this.resourceAddress; } /** * Gets the Cosmos Diagnostic Statistics associated with this exception. * * @return Cosmos Diagnostic Statistics associated with this exception. */ public CosmosDiagnostics getDiagnostics() { return cosmosDiagnostics; } CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) { this.cosmosDiagnostics = cosmosDiagnostics; return this; } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (StringUtils.isEmpty(value)) { return 0; } return Double.valueOf(value); } @Override String innerErrorMessage() { String innerErrorMessage = super.getMessage(); if (cosmosError != null) { innerErrorMessage = cosmosError.getMessage(); if (innerErrorMessage == null) { innerErrorMessage = String.valueOf( ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors")); } } return innerErrorMessage; } private String causeInfo() { Throwable cause = getCause(); if (cause != null) { return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage()); } return null; } private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) { if (requestHeaders == null) { return null; } return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey())) .collect(Collectors.toList()); } RequestTimeline getRequestTimeline() { return this.requestTimeline; } void setRequestTimeline(RequestTimeline requestTimeline) { this.requestTimeline = requestTimeline; } void setResourceAddress(String resourceAddress) { this.resourceAddress = resourceAddress; } void setRntbdServiceEndpointStatistics(RntbdEndpointStatistics rntbdEndpointStatistics) { this.rntbdEndpointStatistics = rntbdEndpointStatistics; } RntbdEndpointStatistics getRntbdServiceEndpointStatistics() { return this.rntbdEndpointStatistics; } void setRntbdRequestLength(int rntbdRequestLength) { this.rntbdRequestLength = rntbdRequestLength; } int getRntbdRequestLength() { return this.rntbdRequestLength; } void setRntbdResponseLength(int rntbdResponseLength) { this.rntbdResponseLength = rntbdResponseLength; } int getRntbdResponseLength() { return this.rntbdResponseLength; } void setRequestPayloadLength(int requestBodyLength) { this.requestPayloadLength = requestBodyLength; } int getRequestPayloadLength() { return this.requestPayloadLength; } boolean hasSendingRequestStarted() { return this.sendingRequestHasStarted; } void setSendingRequestHasStarted(boolean hasSendingRequestStarted) { this.sendingRequestHasStarted = hasSendingRequestStarted; } int getRntbdChannelTaskQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdChannelTaskQueueSize(int rntbdChannelTaskQueueSize) { this.rntbdChannelTaskQueueSize = rntbdChannelTaskQueueSize; } int getRntbdPendingRequestQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdPendingRequestQueueSize(int rntbdPendingRequestQueueSize) { this.rntbdPendingRequestQueueSize = rntbdPendingRequestQueueSize; } }
class CosmosException extends AzureException { private static final long serialVersionUID = 1L; private static final ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final static String USER_AGENT = Utils.getUserAgent(); private final int statusCode; private final Map<String, String> responseHeaders; private CosmosDiagnostics cosmosDiagnostics; private RequestTimeline requestTimeline; private CosmosError cosmosError; private int rntbdChannelTaskQueueSize; private RntbdEndpointStatistics rntbdEndpointStatistics; long lsn; String partitionKeyRangeId; Map<String, String> requestHeaders; Uri requestUri; String resourceAddress; private int requestPayloadLength; private int rntbdPendingRequestQueueSize; private int rntbdRequestLength; private int rntbdResponseLength; private boolean sendingRequestHasStarted; protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) { super(message, cause); this.statusCode = statusCode; this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param errorMessage the error message. */ protected CosmosException(int statusCode, String errorMessage) { this(statusCode, errorMessage, null, null); this.cosmosError = new CosmosError(); ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param innerException the original exception. */ protected CosmosException(int statusCode, Exception innerException) { this(statusCode, null, null, innerException); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders, Throwable cause) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, cause); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param message the string message. * @param statusCode the http status code of the response. * @param exception the exception object. * @param responseHeaders the response headers. * @param resourceAddress the address of the resource the request is associated with. */ protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode, String resourceAddress) { this(statusCode, message, responseHeaders, exception); this.resourceAddress = resourceAddress; } @Override public String getMessage() { try { ObjectNode messageNode = mapper.createObjectNode(); messageNode.put("innerErrorMessage", innerErrorMessage()); if (cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(messageNode, null); } return mapper.writeValueAsString(messageNode); } catch (JsonProcessingException e) { if (cosmosDiagnostics == null) { return innerErrorMessage(); } return innerErrorMessage() + ", " + cosmosDiagnostics.toString(); } } /** * Gets the activity ID associated with the request. * * @return the activity ID. */ public String getActivityId() { if (this.responseHeaders != null) { return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID); } return null; } /** * Gets the http status code. * * @return the status code. */ public int getStatusCode() { return this.statusCode; } /** * Gets the sub status code. * * @return the status code. */ public int getSubStatusCode() { int code = HttpConstants.SubStatusCodes.UNKNOWN; if (this.responseHeaders != null) { String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS); if (StringUtils.isNotEmpty(subStatusString)) { try { code = Integer.parseInt(subStatusString); } catch (NumberFormatException e) { } } } return code; } void setSubStatusCode(int subStatusCode) { this.responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, Integer.toString(subStatusCode)); } /** * Gets the error code associated with the exception. * * @return the error. */ CosmosError getError() { return this.cosmosError; } void setError(CosmosError cosmosError) { this.cosmosError = cosmosError; } /** * Gets the recommended time duration after which the client can retry failed * requests * * @return the recommended time duration after which the client can retry failed * requests. */ public Duration getRetryAfterDuration() { long retryIntervalInMilliseconds = 0; if (this.responseHeaders != null) { String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); if (StringUtils.isNotEmpty(header)) { try { retryIntervalInMilliseconds = Long.parseLong(header); } catch (NumberFormatException e) { } } } return Duration.ofMillis(retryIntervalInMilliseconds); } /** * Gets the response headers as key-value pairs * * @return the response headers */ public Map<String, String> getResponseHeaders() { return this.responseHeaders; } /** * Gets the resource address associated with this exception. * * @return the resource address associated with this exception. */ String getResourceAddress() { return this.resourceAddress; } /** * Gets the Cosmos Diagnostic Statistics associated with this exception. * * @return Cosmos Diagnostic Statistics associated with this exception. */ public CosmosDiagnostics getDiagnostics() { return cosmosDiagnostics; } CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) { this.cosmosDiagnostics = cosmosDiagnostics; return this; } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (StringUtils.isEmpty(value)) { return 0; } return Double.valueOf(value); } @Override String innerErrorMessage() { String innerErrorMessage = super.getMessage(); if (cosmosError != null) { innerErrorMessage = cosmosError.getMessage(); if (innerErrorMessage == null) { innerErrorMessage = String.valueOf( ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors")); } } return innerErrorMessage; } private String causeInfo() { Throwable cause = getCause(); if (cause != null) { return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage()); } return null; } private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) { if (requestHeaders == null) { return null; } return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey())) .collect(Collectors.toList()); } RequestTimeline getRequestTimeline() { return this.requestTimeline; } void setRequestTimeline(RequestTimeline requestTimeline) { this.requestTimeline = requestTimeline; } void setResourceAddress(String resourceAddress) { this.resourceAddress = resourceAddress; } void setRntbdServiceEndpointStatistics(RntbdEndpointStatistics rntbdEndpointStatistics) { this.rntbdEndpointStatistics = rntbdEndpointStatistics; } RntbdEndpointStatistics getRntbdServiceEndpointStatistics() { return this.rntbdEndpointStatistics; } void setRntbdRequestLength(int rntbdRequestLength) { this.rntbdRequestLength = rntbdRequestLength; } int getRntbdRequestLength() { return this.rntbdRequestLength; } void setRntbdResponseLength(int rntbdResponseLength) { this.rntbdResponseLength = rntbdResponseLength; } int getRntbdResponseLength() { return this.rntbdResponseLength; } void setRequestPayloadLength(int requestBodyLength) { this.requestPayloadLength = requestBodyLength; } int getRequestPayloadLength() { return this.requestPayloadLength; } boolean hasSendingRequestStarted() { return this.sendingRequestHasStarted; } void setSendingRequestHasStarted(boolean hasSendingRequestStarted) { this.sendingRequestHasStarted = hasSendingRequestStarted; } int getRntbdChannelTaskQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdChannelTaskQueueSize(int rntbdChannelTaskQueueSize) { this.rntbdChannelTaskQueueSize = rntbdChannelTaskQueueSize; } int getRntbdPendingRequestQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdPendingRequestQueueSize(int rntbdPendingRequestQueueSize) { this.rntbdPendingRequestQueueSize = rntbdPendingRequestQueueSize; } }
Before this PR we were still serializing diagnostics and putting in string on exception message. In this PR we putting diagnostics object in exception parent ObjectNode and then serializing in json at the last. So for computation on diagnostics serialization still remain the same. However I see your point in general to avoid diagnostics serialization on regular customer business scenario like 404 not found. I would like to keep it as sperate work item, however there is down side to put custom logic based on status code, we need to maintain extra logic for future scenarios and secondly its customer who should decide whether to fetch the whole message when there is expected failure and can be handle based on expected status code. Thoughts?
public String toString() { try { ObjectNode exceptionMessageNode = mapper.createObjectNode(); exceptionMessageNode.put("ClassName", getClass().getSimpleName()); exceptionMessageNode.put(USER_AGENT_KEY, USER_AGENT); exceptionMessageNode.put("statusCode", statusCode); exceptionMessageNode.put("resourceAddress", resourceAddress); if (cosmosError != null) { exceptionMessageNode.put("error", cosmosError.toJson()); } exceptionMessageNode.put("innerErrorMessage", innerErrorMessage()); exceptionMessageNode.put("causeInfo", causeInfo()); if (responseHeaders != null) { exceptionMessageNode.put("responseHeaders", responseHeaders.toString()); } List<Map.Entry<String, String>> filterRequestHeaders = filterSensitiveData(requestHeaders); if (filterRequestHeaders != null) { exceptionMessageNode.put("requestHeaders", filterRequestHeaders.toString()); } if(this.cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null); } return mapper.writeValueAsString(exceptionMessageNode); } catch (JsonProcessingException ex) { return getClass().getSimpleName() + "{" + USER_AGENT_KEY +"=" + USER_AGENT + ", error=" + cosmosError + ", " + "resourceAddress='" + resourceAddress + ", statusCode=" + statusCode + ", message=" + getMessage() + ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders=" + filterSensitiveData(requestHeaders) + '}'; } }
cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null);
public String toString() { try { ObjectNode exceptionMessageNode = mapper.createObjectNode(); exceptionMessageNode.put("ClassName", getClass().getSimpleName()); exceptionMessageNode.put(USER_AGENT_KEY, USER_AGENT); exceptionMessageNode.put("statusCode", statusCode); exceptionMessageNode.put("resourceAddress", resourceAddress); if (cosmosError != null) { exceptionMessageNode.put("error", cosmosError.toJson()); } exceptionMessageNode.put("innerErrorMessage", innerErrorMessage()); exceptionMessageNode.put("causeInfo", causeInfo()); if (responseHeaders != null) { exceptionMessageNode.put("responseHeaders", responseHeaders.toString()); } List<Map.Entry<String, String>> filterRequestHeaders = filterSensitiveData(requestHeaders); if (filterRequestHeaders != null) { exceptionMessageNode.put("requestHeaders", filterRequestHeaders.toString()); } if(this.cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null); } return mapper.writeValueAsString(exceptionMessageNode); } catch (JsonProcessingException ex) { return getClass().getSimpleName() + "{" + USER_AGENT_KEY +"=" + USER_AGENT + ", error=" + cosmosError + ", " + "resourceAddress='" + resourceAddress + ", statusCode=" + statusCode + ", message=" + getMessage() + ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders=" + filterSensitiveData(requestHeaders) + '}'; } }
class CosmosException extends AzureException { private static final long serialVersionUID = 1L; private static final ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final static String USER_AGENT = Utils.getUserAgent(); private final int statusCode; private final Map<String, String> responseHeaders; private CosmosDiagnostics cosmosDiagnostics; private RequestTimeline requestTimeline; private CosmosError cosmosError; private int rntbdChannelTaskQueueSize; private RntbdEndpointStatistics rntbdEndpointStatistics; long lsn; String partitionKeyRangeId; Map<String, String> requestHeaders; Uri requestUri; String resourceAddress; private int requestPayloadLength; private int rntbdPendingRequestQueueSize; private int rntbdRequestLength; private int rntbdResponseLength; private boolean sendingRequestHasStarted; protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) { super(message, cause); this.statusCode = statusCode; this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param errorMessage the error message. */ protected CosmosException(int statusCode, String errorMessage) { this(statusCode, errorMessage, null, null); this.cosmosError = new CosmosError(); ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param innerException the original exception. */ protected CosmosException(int statusCode, Exception innerException) { this(statusCode, null, null, innerException); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders, Throwable cause) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, cause); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param message the string message. * @param statusCode the http status code of the response. * @param exception the exception object. * @param responseHeaders the response headers. * @param resourceAddress the address of the resource the request is associated with. */ protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode, String resourceAddress) { this(statusCode, message, responseHeaders, exception); this.resourceAddress = resourceAddress; } @Override public String getMessage() { try { ObjectNode messageNode = mapper.createObjectNode(); messageNode.put("innerErrorMessage", innerErrorMessage()); if (cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(messageNode, null); } return mapper.writeValueAsString(messageNode); } catch (JsonProcessingException e) { if (cosmosDiagnostics == null) { return innerErrorMessage(); } return innerErrorMessage() + ", " + cosmosDiagnostics.toString(); } } /** * Gets the activity ID associated with the request. * * @return the activity ID. */ public String getActivityId() { if (this.responseHeaders != null) { return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID); } return null; } /** * Gets the http status code. * * @return the status code. */ public int getStatusCode() { return this.statusCode; } /** * Gets the sub status code. * * @return the status code. */ public int getSubStatusCode() { int code = HttpConstants.SubStatusCodes.UNKNOWN; if (this.responseHeaders != null) { String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS); if (StringUtils.isNotEmpty(subStatusString)) { try { code = Integer.parseInt(subStatusString); } catch (NumberFormatException e) { } } } return code; } void setSubStatusCode(int subStatusCode) { this.responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, Integer.toString(subStatusCode)); } /** * Gets the error code associated with the exception. * * @return the error. */ CosmosError getError() { return this.cosmosError; } void setError(CosmosError cosmosError) { this.cosmosError = cosmosError; } /** * Gets the recommended time duration after which the client can retry failed * requests * * @return the recommended time duration after which the client can retry failed * requests. */ public Duration getRetryAfterDuration() { long retryIntervalInMilliseconds = 0; if (this.responseHeaders != null) { String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); if (StringUtils.isNotEmpty(header)) { try { retryIntervalInMilliseconds = Long.parseLong(header); } catch (NumberFormatException e) { } } } return Duration.ofMillis(retryIntervalInMilliseconds); } /** * Gets the response headers as key-value pairs * * @return the response headers */ public Map<String, String> getResponseHeaders() { return this.responseHeaders; } /** * Gets the resource address associated with this exception. * * @return the resource address associated with this exception. */ String getResourceAddress() { return this.resourceAddress; } /** * Gets the Cosmos Diagnostic Statistics associated with this exception. * * @return Cosmos Diagnostic Statistics associated with this exception. */ public CosmosDiagnostics getDiagnostics() { return cosmosDiagnostics; } CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) { this.cosmosDiagnostics = cosmosDiagnostics; return this; } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (StringUtils.isEmpty(value)) { return 0; } return Double.valueOf(value); } @Override String innerErrorMessage() { String innerErrorMessage = super.getMessage(); if (cosmosError != null) { innerErrorMessage = cosmosError.getMessage(); if (innerErrorMessage == null) { innerErrorMessage = String.valueOf( ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors")); } } return innerErrorMessage; } private String causeInfo() { Throwable cause = getCause(); if (cause != null) { return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage()); } return null; } private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) { if (requestHeaders == null) { return null; } return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey())) .collect(Collectors.toList()); } RequestTimeline getRequestTimeline() { return this.requestTimeline; } void setRequestTimeline(RequestTimeline requestTimeline) { this.requestTimeline = requestTimeline; } void setResourceAddress(String resourceAddress) { this.resourceAddress = resourceAddress; } void setRntbdServiceEndpointStatistics(RntbdEndpointStatistics rntbdEndpointStatistics) { this.rntbdEndpointStatistics = rntbdEndpointStatistics; } RntbdEndpointStatistics getRntbdServiceEndpointStatistics() { return this.rntbdEndpointStatistics; } void setRntbdRequestLength(int rntbdRequestLength) { this.rntbdRequestLength = rntbdRequestLength; } int getRntbdRequestLength() { return this.rntbdRequestLength; } void setRntbdResponseLength(int rntbdResponseLength) { this.rntbdResponseLength = rntbdResponseLength; } int getRntbdResponseLength() { return this.rntbdResponseLength; } void setRequestPayloadLength(int requestBodyLength) { this.requestPayloadLength = requestBodyLength; } int getRequestPayloadLength() { return this.requestPayloadLength; } boolean hasSendingRequestStarted() { return this.sendingRequestHasStarted; } void setSendingRequestHasStarted(boolean hasSendingRequestStarted) { this.sendingRequestHasStarted = hasSendingRequestStarted; } int getRntbdChannelTaskQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdChannelTaskQueueSize(int rntbdChannelTaskQueueSize) { this.rntbdChannelTaskQueueSize = rntbdChannelTaskQueueSize; } int getRntbdPendingRequestQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdPendingRequestQueueSize(int rntbdPendingRequestQueueSize) { this.rntbdPendingRequestQueueSize = rntbdPendingRequestQueueSize; } }
class CosmosException extends AzureException { private static final long serialVersionUID = 1L; private static final ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final static String USER_AGENT = Utils.getUserAgent(); private final int statusCode; private final Map<String, String> responseHeaders; private CosmosDiagnostics cosmosDiagnostics; private RequestTimeline requestTimeline; private CosmosError cosmosError; private int rntbdChannelTaskQueueSize; private RntbdEndpointStatistics rntbdEndpointStatistics; long lsn; String partitionKeyRangeId; Map<String, String> requestHeaders; Uri requestUri; String resourceAddress; private int requestPayloadLength; private int rntbdPendingRequestQueueSize; private int rntbdRequestLength; private int rntbdResponseLength; private boolean sendingRequestHasStarted; protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) { super(message, cause); this.statusCode = statusCode; this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param errorMessage the error message. */ protected CosmosException(int statusCode, String errorMessage) { this(statusCode, errorMessage, null, null); this.cosmosError = new CosmosError(); ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param innerException the original exception. */ protected CosmosException(int statusCode, Exception innerException) { this(statusCode, null, null, innerException); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders, Throwable cause) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, cause); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param message the string message. * @param statusCode the http status code of the response. * @param exception the exception object. * @param responseHeaders the response headers. * @param resourceAddress the address of the resource the request is associated with. */ protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode, String resourceAddress) { this(statusCode, message, responseHeaders, exception); this.resourceAddress = resourceAddress; } @Override public String getMessage() { try { ObjectNode messageNode = mapper.createObjectNode(); messageNode.put("innerErrorMessage", innerErrorMessage()); if (cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(messageNode, null); } return mapper.writeValueAsString(messageNode); } catch (JsonProcessingException e) { if (cosmosDiagnostics == null) { return innerErrorMessage(); } return innerErrorMessage() + ", " + cosmosDiagnostics.toString(); } } /** * Gets the activity ID associated with the request. * * @return the activity ID. */ public String getActivityId() { if (this.responseHeaders != null) { return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID); } return null; } /** * Gets the http status code. * * @return the status code. */ public int getStatusCode() { return this.statusCode; } /** * Gets the sub status code. * * @return the status code. */ public int getSubStatusCode() { int code = HttpConstants.SubStatusCodes.UNKNOWN; if (this.responseHeaders != null) { String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS); if (StringUtils.isNotEmpty(subStatusString)) { try { code = Integer.parseInt(subStatusString); } catch (NumberFormatException e) { } } } return code; } void setSubStatusCode(int subStatusCode) { this.responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, Integer.toString(subStatusCode)); } /** * Gets the error code associated with the exception. * * @return the error. */ CosmosError getError() { return this.cosmosError; } void setError(CosmosError cosmosError) { this.cosmosError = cosmosError; } /** * Gets the recommended time duration after which the client can retry failed * requests * * @return the recommended time duration after which the client can retry failed * requests. */ public Duration getRetryAfterDuration() { long retryIntervalInMilliseconds = 0; if (this.responseHeaders != null) { String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); if (StringUtils.isNotEmpty(header)) { try { retryIntervalInMilliseconds = Long.parseLong(header); } catch (NumberFormatException e) { } } } return Duration.ofMillis(retryIntervalInMilliseconds); } /** * Gets the response headers as key-value pairs * * @return the response headers */ public Map<String, String> getResponseHeaders() { return this.responseHeaders; } /** * Gets the resource address associated with this exception. * * @return the resource address associated with this exception. */ String getResourceAddress() { return this.resourceAddress; } /** * Gets the Cosmos Diagnostic Statistics associated with this exception. * * @return Cosmos Diagnostic Statistics associated with this exception. */ public CosmosDiagnostics getDiagnostics() { return cosmosDiagnostics; } CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) { this.cosmosDiagnostics = cosmosDiagnostics; return this; } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (StringUtils.isEmpty(value)) { return 0; } return Double.valueOf(value); } @Override String innerErrorMessage() { String innerErrorMessage = super.getMessage(); if (cosmosError != null) { innerErrorMessage = cosmosError.getMessage(); if (innerErrorMessage == null) { innerErrorMessage = String.valueOf( ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors")); } } return innerErrorMessage; } private String causeInfo() { Throwable cause = getCause(); if (cause != null) { return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage()); } return null; } private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) { if (requestHeaders == null) { return null; } return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey())) .collect(Collectors.toList()); } RequestTimeline getRequestTimeline() { return this.requestTimeline; } void setRequestTimeline(RequestTimeline requestTimeline) { this.requestTimeline = requestTimeline; } void setResourceAddress(String resourceAddress) { this.resourceAddress = resourceAddress; } void setRntbdServiceEndpointStatistics(RntbdEndpointStatistics rntbdEndpointStatistics) { this.rntbdEndpointStatistics = rntbdEndpointStatistics; } RntbdEndpointStatistics getRntbdServiceEndpointStatistics() { return this.rntbdEndpointStatistics; } void setRntbdRequestLength(int rntbdRequestLength) { this.rntbdRequestLength = rntbdRequestLength; } int getRntbdRequestLength() { return this.rntbdRequestLength; } void setRntbdResponseLength(int rntbdResponseLength) { this.rntbdResponseLength = rntbdResponseLength; } int getRntbdResponseLength() { return this.rntbdResponseLength; } void setRequestPayloadLength(int requestBodyLength) { this.requestPayloadLength = requestBodyLength; } int getRequestPayloadLength() { return this.requestPayloadLength; } boolean hasSendingRequestStarted() { return this.sendingRequestHasStarted; } void setSendingRequestHasStarted(boolean hasSendingRequestStarted) { this.sendingRequestHasStarted = hasSendingRequestStarted; } int getRntbdChannelTaskQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdChannelTaskQueueSize(int rntbdChannelTaskQueueSize) { this.rntbdChannelTaskQueueSize = rntbdChannelTaskQueueSize; } int getRntbdPendingRequestQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdPendingRequestQueueSize(int rntbdPendingRequestQueueSize) { this.rntbdPendingRequestQueueSize = rntbdPendingRequestQueueSize; } }
We should exclude the business logic failure (404, 409, 412) and not include in the exception due to the perf impact. Feel free to capture this work in a separate PR.
public String toString() { try { ObjectNode exceptionMessageNode = mapper.createObjectNode(); exceptionMessageNode.put("ClassName", getClass().getSimpleName()); exceptionMessageNode.put(USER_AGENT_KEY, USER_AGENT); exceptionMessageNode.put("statusCode", statusCode); exceptionMessageNode.put("resourceAddress", resourceAddress); if (cosmosError != null) { exceptionMessageNode.put("error", cosmosError.toJson()); } exceptionMessageNode.put("innerErrorMessage", innerErrorMessage()); exceptionMessageNode.put("causeInfo", causeInfo()); if (responseHeaders != null) { exceptionMessageNode.put("responseHeaders", responseHeaders.toString()); } List<Map.Entry<String, String>> filterRequestHeaders = filterSensitiveData(requestHeaders); if (filterRequestHeaders != null) { exceptionMessageNode.put("requestHeaders", filterRequestHeaders.toString()); } if(this.cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null); } return mapper.writeValueAsString(exceptionMessageNode); } catch (JsonProcessingException ex) { return getClass().getSimpleName() + "{" + USER_AGENT_KEY +"=" + USER_AGENT + ", error=" + cosmosError + ", " + "resourceAddress='" + resourceAddress + ", statusCode=" + statusCode + ", message=" + getMessage() + ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders=" + filterSensitiveData(requestHeaders) + '}'; } }
cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null);
public String toString() { try { ObjectNode exceptionMessageNode = mapper.createObjectNode(); exceptionMessageNode.put("ClassName", getClass().getSimpleName()); exceptionMessageNode.put(USER_AGENT_KEY, USER_AGENT); exceptionMessageNode.put("statusCode", statusCode); exceptionMessageNode.put("resourceAddress", resourceAddress); if (cosmosError != null) { exceptionMessageNode.put("error", cosmosError.toJson()); } exceptionMessageNode.put("innerErrorMessage", innerErrorMessage()); exceptionMessageNode.put("causeInfo", causeInfo()); if (responseHeaders != null) { exceptionMessageNode.put("responseHeaders", responseHeaders.toString()); } List<Map.Entry<String, String>> filterRequestHeaders = filterSensitiveData(requestHeaders); if (filterRequestHeaders != null) { exceptionMessageNode.put("requestHeaders", filterRequestHeaders.toString()); } if(this.cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null); } return mapper.writeValueAsString(exceptionMessageNode); } catch (JsonProcessingException ex) { return getClass().getSimpleName() + "{" + USER_AGENT_KEY +"=" + USER_AGENT + ", error=" + cosmosError + ", " + "resourceAddress='" + resourceAddress + ", statusCode=" + statusCode + ", message=" + getMessage() + ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders=" + filterSensitiveData(requestHeaders) + '}'; } }
class CosmosException extends AzureException { private static final long serialVersionUID = 1L; private static final ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final static String USER_AGENT = Utils.getUserAgent(); private final int statusCode; private final Map<String, String> responseHeaders; private CosmosDiagnostics cosmosDiagnostics; private RequestTimeline requestTimeline; private CosmosError cosmosError; private int rntbdChannelTaskQueueSize; private RntbdEndpointStatistics rntbdEndpointStatistics; long lsn; String partitionKeyRangeId; Map<String, String> requestHeaders; Uri requestUri; String resourceAddress; private int requestPayloadLength; private int rntbdPendingRequestQueueSize; private int rntbdRequestLength; private int rntbdResponseLength; private boolean sendingRequestHasStarted; protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) { super(message, cause); this.statusCode = statusCode; this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param errorMessage the error message. */ protected CosmosException(int statusCode, String errorMessage) { this(statusCode, errorMessage, null, null); this.cosmosError = new CosmosError(); ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param innerException the original exception. */ protected CosmosException(int statusCode, Exception innerException) { this(statusCode, null, null, innerException); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders, Throwable cause) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, cause); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param message the string message. * @param statusCode the http status code of the response. * @param exception the exception object. * @param responseHeaders the response headers. * @param resourceAddress the address of the resource the request is associated with. */ protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode, String resourceAddress) { this(statusCode, message, responseHeaders, exception); this.resourceAddress = resourceAddress; } @Override public String getMessage() { try { ObjectNode messageNode = mapper.createObjectNode(); messageNode.put("innerErrorMessage", innerErrorMessage()); if (cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(messageNode, null); } return mapper.writeValueAsString(messageNode); } catch (JsonProcessingException e) { if (cosmosDiagnostics == null) { return innerErrorMessage(); } return innerErrorMessage() + ", " + cosmosDiagnostics.toString(); } } /** * Gets the activity ID associated with the request. * * @return the activity ID. */ public String getActivityId() { if (this.responseHeaders != null) { return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID); } return null; } /** * Gets the http status code. * * @return the status code. */ public int getStatusCode() { return this.statusCode; } /** * Gets the sub status code. * * @return the status code. */ public int getSubStatusCode() { int code = HttpConstants.SubStatusCodes.UNKNOWN; if (this.responseHeaders != null) { String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS); if (StringUtils.isNotEmpty(subStatusString)) { try { code = Integer.parseInt(subStatusString); } catch (NumberFormatException e) { } } } return code; } void setSubStatusCode(int subStatusCode) { this.responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, Integer.toString(subStatusCode)); } /** * Gets the error code associated with the exception. * * @return the error. */ CosmosError getError() { return this.cosmosError; } void setError(CosmosError cosmosError) { this.cosmosError = cosmosError; } /** * Gets the recommended time duration after which the client can retry failed * requests * * @return the recommended time duration after which the client can retry failed * requests. */ public Duration getRetryAfterDuration() { long retryIntervalInMilliseconds = 0; if (this.responseHeaders != null) { String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); if (StringUtils.isNotEmpty(header)) { try { retryIntervalInMilliseconds = Long.parseLong(header); } catch (NumberFormatException e) { } } } return Duration.ofMillis(retryIntervalInMilliseconds); } /** * Gets the response headers as key-value pairs * * @return the response headers */ public Map<String, String> getResponseHeaders() { return this.responseHeaders; } /** * Gets the resource address associated with this exception. * * @return the resource address associated with this exception. */ String getResourceAddress() { return this.resourceAddress; } /** * Gets the Cosmos Diagnostic Statistics associated with this exception. * * @return Cosmos Diagnostic Statistics associated with this exception. */ public CosmosDiagnostics getDiagnostics() { return cosmosDiagnostics; } CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) { this.cosmosDiagnostics = cosmosDiagnostics; return this; } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (StringUtils.isEmpty(value)) { return 0; } return Double.valueOf(value); } @Override String innerErrorMessage() { String innerErrorMessage = super.getMessage(); if (cosmosError != null) { innerErrorMessage = cosmosError.getMessage(); if (innerErrorMessage == null) { innerErrorMessage = String.valueOf( ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors")); } } return innerErrorMessage; } private String causeInfo() { Throwable cause = getCause(); if (cause != null) { return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage()); } return null; } private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) { if (requestHeaders == null) { return null; } return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey())) .collect(Collectors.toList()); } RequestTimeline getRequestTimeline() { return this.requestTimeline; } void setRequestTimeline(RequestTimeline requestTimeline) { this.requestTimeline = requestTimeline; } void setResourceAddress(String resourceAddress) { this.resourceAddress = resourceAddress; } void setRntbdServiceEndpointStatistics(RntbdEndpointStatistics rntbdEndpointStatistics) { this.rntbdEndpointStatistics = rntbdEndpointStatistics; } RntbdEndpointStatistics getRntbdServiceEndpointStatistics() { return this.rntbdEndpointStatistics; } void setRntbdRequestLength(int rntbdRequestLength) { this.rntbdRequestLength = rntbdRequestLength; } int getRntbdRequestLength() { return this.rntbdRequestLength; } void setRntbdResponseLength(int rntbdResponseLength) { this.rntbdResponseLength = rntbdResponseLength; } int getRntbdResponseLength() { return this.rntbdResponseLength; } void setRequestPayloadLength(int requestBodyLength) { this.requestPayloadLength = requestBodyLength; } int getRequestPayloadLength() { return this.requestPayloadLength; } boolean hasSendingRequestStarted() { return this.sendingRequestHasStarted; } void setSendingRequestHasStarted(boolean hasSendingRequestStarted) { this.sendingRequestHasStarted = hasSendingRequestStarted; } int getRntbdChannelTaskQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdChannelTaskQueueSize(int rntbdChannelTaskQueueSize) { this.rntbdChannelTaskQueueSize = rntbdChannelTaskQueueSize; } int getRntbdPendingRequestQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdPendingRequestQueueSize(int rntbdPendingRequestQueueSize) { this.rntbdPendingRequestQueueSize = rntbdPendingRequestQueueSize; } }
class CosmosException extends AzureException { private static final long serialVersionUID = 1L; private static final ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final static String USER_AGENT = Utils.getUserAgent(); private final int statusCode; private final Map<String, String> responseHeaders; private CosmosDiagnostics cosmosDiagnostics; private RequestTimeline requestTimeline; private CosmosError cosmosError; private int rntbdChannelTaskQueueSize; private RntbdEndpointStatistics rntbdEndpointStatistics; long lsn; String partitionKeyRangeId; Map<String, String> requestHeaders; Uri requestUri; String resourceAddress; private int requestPayloadLength; private int rntbdPendingRequestQueueSize; private int rntbdRequestLength; private int rntbdResponseLength; private boolean sendingRequestHasStarted; protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) { super(message, cause); this.statusCode = statusCode; this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param errorMessage the error message. */ protected CosmosException(int statusCode, String errorMessage) { this(statusCode, errorMessage, null, null); this.cosmosError = new CosmosError(); ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param innerException the original exception. */ protected CosmosException(int statusCode, Exception innerException) { this(statusCode, null, null, innerException); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders, Throwable cause) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, cause); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param message the string message. * @param statusCode the http status code of the response. * @param exception the exception object. * @param responseHeaders the response headers. * @param resourceAddress the address of the resource the request is associated with. */ protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode, String resourceAddress) { this(statusCode, message, responseHeaders, exception); this.resourceAddress = resourceAddress; } @Override public String getMessage() { try { ObjectNode messageNode = mapper.createObjectNode(); messageNode.put("innerErrorMessage", innerErrorMessage()); if (cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(messageNode, null); } return mapper.writeValueAsString(messageNode); } catch (JsonProcessingException e) { if (cosmosDiagnostics == null) { return innerErrorMessage(); } return innerErrorMessage() + ", " + cosmosDiagnostics.toString(); } } /** * Gets the activity ID associated with the request. * * @return the activity ID. */ public String getActivityId() { if (this.responseHeaders != null) { return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID); } return null; } /** * Gets the http status code. * * @return the status code. */ public int getStatusCode() { return this.statusCode; } /** * Gets the sub status code. * * @return the status code. */ public int getSubStatusCode() { int code = HttpConstants.SubStatusCodes.UNKNOWN; if (this.responseHeaders != null) { String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS); if (StringUtils.isNotEmpty(subStatusString)) { try { code = Integer.parseInt(subStatusString); } catch (NumberFormatException e) { } } } return code; } void setSubStatusCode(int subStatusCode) { this.responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, Integer.toString(subStatusCode)); } /** * Gets the error code associated with the exception. * * @return the error. */ CosmosError getError() { return this.cosmosError; } void setError(CosmosError cosmosError) { this.cosmosError = cosmosError; } /** * Gets the recommended time duration after which the client can retry failed * requests * * @return the recommended time duration after which the client can retry failed * requests. */ public Duration getRetryAfterDuration() { long retryIntervalInMilliseconds = 0; if (this.responseHeaders != null) { String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); if (StringUtils.isNotEmpty(header)) { try { retryIntervalInMilliseconds = Long.parseLong(header); } catch (NumberFormatException e) { } } } return Duration.ofMillis(retryIntervalInMilliseconds); } /** * Gets the response headers as key-value pairs * * @return the response headers */ public Map<String, String> getResponseHeaders() { return this.responseHeaders; } /** * Gets the resource address associated with this exception. * * @return the resource address associated with this exception. */ String getResourceAddress() { return this.resourceAddress; } /** * Gets the Cosmos Diagnostic Statistics associated with this exception. * * @return Cosmos Diagnostic Statistics associated with this exception. */ public CosmosDiagnostics getDiagnostics() { return cosmosDiagnostics; } CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) { this.cosmosDiagnostics = cosmosDiagnostics; return this; } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (StringUtils.isEmpty(value)) { return 0; } return Double.valueOf(value); } @Override String innerErrorMessage() { String innerErrorMessage = super.getMessage(); if (cosmosError != null) { innerErrorMessage = cosmosError.getMessage(); if (innerErrorMessage == null) { innerErrorMessage = String.valueOf( ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors")); } } return innerErrorMessage; } private String causeInfo() { Throwable cause = getCause(); if (cause != null) { return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage()); } return null; } private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) { if (requestHeaders == null) { return null; } return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey())) .collect(Collectors.toList()); } RequestTimeline getRequestTimeline() { return this.requestTimeline; } void setRequestTimeline(RequestTimeline requestTimeline) { this.requestTimeline = requestTimeline; } void setResourceAddress(String resourceAddress) { this.resourceAddress = resourceAddress; } void setRntbdServiceEndpointStatistics(RntbdEndpointStatistics rntbdEndpointStatistics) { this.rntbdEndpointStatistics = rntbdEndpointStatistics; } RntbdEndpointStatistics getRntbdServiceEndpointStatistics() { return this.rntbdEndpointStatistics; } void setRntbdRequestLength(int rntbdRequestLength) { this.rntbdRequestLength = rntbdRequestLength; } int getRntbdRequestLength() { return this.rntbdRequestLength; } void setRntbdResponseLength(int rntbdResponseLength) { this.rntbdResponseLength = rntbdResponseLength; } int getRntbdResponseLength() { return this.rntbdResponseLength; } void setRequestPayloadLength(int requestBodyLength) { this.requestPayloadLength = requestBodyLength; } int getRequestPayloadLength() { return this.requestPayloadLength; } boolean hasSendingRequestStarted() { return this.sendingRequestHasStarted; } void setSendingRequestHasStarted(boolean hasSendingRequestStarted) { this.sendingRequestHasStarted = hasSendingRequestStarted; } int getRntbdChannelTaskQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdChannelTaskQueueSize(int rntbdChannelTaskQueueSize) { this.rntbdChannelTaskQueueSize = rntbdChannelTaskQueueSize; } int getRntbdPendingRequestQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdPendingRequestQueueSize(int rntbdPendingRequestQueueSize) { this.rntbdPendingRequestQueueSize = rntbdPendingRequestQueueSize; } }
https://github.com/Azure/azure-sdk-for-java/issues/18271
public String toString() { try { ObjectNode exceptionMessageNode = mapper.createObjectNode(); exceptionMessageNode.put("ClassName", getClass().getSimpleName()); exceptionMessageNode.put(USER_AGENT_KEY, USER_AGENT); exceptionMessageNode.put("statusCode", statusCode); exceptionMessageNode.put("resourceAddress", resourceAddress); if (cosmosError != null) { exceptionMessageNode.put("error", cosmosError.toJson()); } exceptionMessageNode.put("innerErrorMessage", innerErrorMessage()); exceptionMessageNode.put("causeInfo", causeInfo()); if (responseHeaders != null) { exceptionMessageNode.put("responseHeaders", responseHeaders.toString()); } List<Map.Entry<String, String>> filterRequestHeaders = filterSensitiveData(requestHeaders); if (filterRequestHeaders != null) { exceptionMessageNode.put("requestHeaders", filterRequestHeaders.toString()); } if(this.cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null); } return mapper.writeValueAsString(exceptionMessageNode); } catch (JsonProcessingException ex) { return getClass().getSimpleName() + "{" + USER_AGENT_KEY +"=" + USER_AGENT + ", error=" + cosmosError + ", " + "resourceAddress='" + resourceAddress + ", statusCode=" + statusCode + ", message=" + getMessage() + ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders=" + filterSensitiveData(requestHeaders) + '}'; } }
cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null);
public String toString() { try { ObjectNode exceptionMessageNode = mapper.createObjectNode(); exceptionMessageNode.put("ClassName", getClass().getSimpleName()); exceptionMessageNode.put(USER_AGENT_KEY, USER_AGENT); exceptionMessageNode.put("statusCode", statusCode); exceptionMessageNode.put("resourceAddress", resourceAddress); if (cosmosError != null) { exceptionMessageNode.put("error", cosmosError.toJson()); } exceptionMessageNode.put("innerErrorMessage", innerErrorMessage()); exceptionMessageNode.put("causeInfo", causeInfo()); if (responseHeaders != null) { exceptionMessageNode.put("responseHeaders", responseHeaders.toString()); } List<Map.Entry<String, String>> filterRequestHeaders = filterSensitiveData(requestHeaders); if (filterRequestHeaders != null) { exceptionMessageNode.put("requestHeaders", filterRequestHeaders.toString()); } if(this.cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(exceptionMessageNode, null); } return mapper.writeValueAsString(exceptionMessageNode); } catch (JsonProcessingException ex) { return getClass().getSimpleName() + "{" + USER_AGENT_KEY +"=" + USER_AGENT + ", error=" + cosmosError + ", " + "resourceAddress='" + resourceAddress + ", statusCode=" + statusCode + ", message=" + getMessage() + ", causeInfo=" + causeInfo() + ", responseHeaders=" + responseHeaders + ", requestHeaders=" + filterSensitiveData(requestHeaders) + '}'; } }
class CosmosException extends AzureException { private static final long serialVersionUID = 1L; private static final ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final static String USER_AGENT = Utils.getUserAgent(); private final int statusCode; private final Map<String, String> responseHeaders; private CosmosDiagnostics cosmosDiagnostics; private RequestTimeline requestTimeline; private CosmosError cosmosError; private int rntbdChannelTaskQueueSize; private RntbdEndpointStatistics rntbdEndpointStatistics; long lsn; String partitionKeyRangeId; Map<String, String> requestHeaders; Uri requestUri; String resourceAddress; private int requestPayloadLength; private int rntbdPendingRequestQueueSize; private int rntbdRequestLength; private int rntbdResponseLength; private boolean sendingRequestHasStarted; protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) { super(message, cause); this.statusCode = statusCode; this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param errorMessage the error message. */ protected CosmosException(int statusCode, String errorMessage) { this(statusCode, errorMessage, null, null); this.cosmosError = new CosmosError(); ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param innerException the original exception. */ protected CosmosException(int statusCode, Exception innerException) { this(statusCode, null, null, innerException); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders, Throwable cause) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, cause); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param message the string message. * @param statusCode the http status code of the response. * @param exception the exception object. * @param responseHeaders the response headers. * @param resourceAddress the address of the resource the request is associated with. */ protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode, String resourceAddress) { this(statusCode, message, responseHeaders, exception); this.resourceAddress = resourceAddress; } @Override public String getMessage() { try { ObjectNode messageNode = mapper.createObjectNode(); messageNode.put("innerErrorMessage", innerErrorMessage()); if (cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(messageNode, null); } return mapper.writeValueAsString(messageNode); } catch (JsonProcessingException e) { if (cosmosDiagnostics == null) { return innerErrorMessage(); } return innerErrorMessage() + ", " + cosmosDiagnostics.toString(); } } /** * Gets the activity ID associated with the request. * * @return the activity ID. */ public String getActivityId() { if (this.responseHeaders != null) { return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID); } return null; } /** * Gets the http status code. * * @return the status code. */ public int getStatusCode() { return this.statusCode; } /** * Gets the sub status code. * * @return the status code. */ public int getSubStatusCode() { int code = HttpConstants.SubStatusCodes.UNKNOWN; if (this.responseHeaders != null) { String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS); if (StringUtils.isNotEmpty(subStatusString)) { try { code = Integer.parseInt(subStatusString); } catch (NumberFormatException e) { } } } return code; } void setSubStatusCode(int subStatusCode) { this.responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, Integer.toString(subStatusCode)); } /** * Gets the error code associated with the exception. * * @return the error. */ CosmosError getError() { return this.cosmosError; } void setError(CosmosError cosmosError) { this.cosmosError = cosmosError; } /** * Gets the recommended time duration after which the client can retry failed * requests * * @return the recommended time duration after which the client can retry failed * requests. */ public Duration getRetryAfterDuration() { long retryIntervalInMilliseconds = 0; if (this.responseHeaders != null) { String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); if (StringUtils.isNotEmpty(header)) { try { retryIntervalInMilliseconds = Long.parseLong(header); } catch (NumberFormatException e) { } } } return Duration.ofMillis(retryIntervalInMilliseconds); } /** * Gets the response headers as key-value pairs * * @return the response headers */ public Map<String, String> getResponseHeaders() { return this.responseHeaders; } /** * Gets the resource address associated with this exception. * * @return the resource address associated with this exception. */ String getResourceAddress() { return this.resourceAddress; } /** * Gets the Cosmos Diagnostic Statistics associated with this exception. * * @return Cosmos Diagnostic Statistics associated with this exception. */ public CosmosDiagnostics getDiagnostics() { return cosmosDiagnostics; } CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) { this.cosmosDiagnostics = cosmosDiagnostics; return this; } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (StringUtils.isEmpty(value)) { return 0; } return Double.valueOf(value); } @Override String innerErrorMessage() { String innerErrorMessage = super.getMessage(); if (cosmosError != null) { innerErrorMessage = cosmosError.getMessage(); if (innerErrorMessage == null) { innerErrorMessage = String.valueOf( ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors")); } } return innerErrorMessage; } private String causeInfo() { Throwable cause = getCause(); if (cause != null) { return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage()); } return null; } private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) { if (requestHeaders == null) { return null; } return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey())) .collect(Collectors.toList()); } RequestTimeline getRequestTimeline() { return this.requestTimeline; } void setRequestTimeline(RequestTimeline requestTimeline) { this.requestTimeline = requestTimeline; } void setResourceAddress(String resourceAddress) { this.resourceAddress = resourceAddress; } void setRntbdServiceEndpointStatistics(RntbdEndpointStatistics rntbdEndpointStatistics) { this.rntbdEndpointStatistics = rntbdEndpointStatistics; } RntbdEndpointStatistics getRntbdServiceEndpointStatistics() { return this.rntbdEndpointStatistics; } void setRntbdRequestLength(int rntbdRequestLength) { this.rntbdRequestLength = rntbdRequestLength; } int getRntbdRequestLength() { return this.rntbdRequestLength; } void setRntbdResponseLength(int rntbdResponseLength) { this.rntbdResponseLength = rntbdResponseLength; } int getRntbdResponseLength() { return this.rntbdResponseLength; } void setRequestPayloadLength(int requestBodyLength) { this.requestPayloadLength = requestBodyLength; } int getRequestPayloadLength() { return this.requestPayloadLength; } boolean hasSendingRequestStarted() { return this.sendingRequestHasStarted; } void setSendingRequestHasStarted(boolean hasSendingRequestStarted) { this.sendingRequestHasStarted = hasSendingRequestStarted; } int getRntbdChannelTaskQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdChannelTaskQueueSize(int rntbdChannelTaskQueueSize) { this.rntbdChannelTaskQueueSize = rntbdChannelTaskQueueSize; } int getRntbdPendingRequestQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdPendingRequestQueueSize(int rntbdPendingRequestQueueSize) { this.rntbdPendingRequestQueueSize = rntbdPendingRequestQueueSize; } }
class CosmosException extends AzureException { private static final long serialVersionUID = 1L; private static final ObjectMapper mapper = Utils.getSimpleObjectMapper(); private final static String USER_AGENT = Utils.getUserAgent(); private final int statusCode; private final Map<String, String> responseHeaders; private CosmosDiagnostics cosmosDiagnostics; private RequestTimeline requestTimeline; private CosmosError cosmosError; private int rntbdChannelTaskQueueSize; private RntbdEndpointStatistics rntbdEndpointStatistics; long lsn; String partitionKeyRangeId; Map<String, String> requestHeaders; Uri requestUri; String resourceAddress; private int requestPayloadLength; private int rntbdPendingRequestQueueSize; private int rntbdRequestLength; private int rntbdResponseLength; private boolean sendingRequestHasStarted; protected CosmosException(int statusCode, String message, Map<String, String> responseHeaders, Throwable cause) { super(message, cause); this.statusCode = statusCode; this.responseHeaders = responseHeaders == null ? new HashMap<>() : new HashMap<>(responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param errorMessage the error message. */ protected CosmosException(int statusCode, String errorMessage) { this(statusCode, errorMessage, null, null); this.cosmosError = new CosmosError(); ModelBridgeInternal.setProperty(cosmosError, Constants.Properties.MESSAGE, errorMessage); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param innerException the original exception. */ protected CosmosException(int statusCode, Exception innerException) { this(statusCode, null, null, innerException); } /** * Creates a new instance of the CosmosException class. * * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(/* resourceAddress */ null, statusCode, cosmosErrorResource, responseHeaders); } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, null); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param resourceAddress the address of the resource the request is associated with. * @param statusCode the http status code of the response. * @param cosmosErrorResource the error resource object. * @param responseHeaders the response headers. * @param cause the inner exception */ protected CosmosException(String resourceAddress, int statusCode, CosmosError cosmosErrorResource, Map<String, String> responseHeaders, Throwable cause) { this(statusCode, cosmosErrorResource == null ? null : cosmosErrorResource.getMessage(), responseHeaders, cause); this.resourceAddress = resourceAddress; this.cosmosError = cosmosErrorResource; } /** * Creates a new instance of the CosmosException class. * * @param message the string message. * @param statusCode the http status code of the response. * @param exception the exception object. * @param responseHeaders the response headers. * @param resourceAddress the address of the resource the request is associated with. */ protected CosmosException(String message, Exception exception, Map<String, String> responseHeaders, int statusCode, String resourceAddress) { this(statusCode, message, responseHeaders, exception); this.resourceAddress = resourceAddress; } @Override public String getMessage() { try { ObjectNode messageNode = mapper.createObjectNode(); messageNode.put("innerErrorMessage", innerErrorMessage()); if (cosmosDiagnostics != null) { cosmosDiagnostics.fillCosmosDiagnostics(messageNode, null); } return mapper.writeValueAsString(messageNode); } catch (JsonProcessingException e) { if (cosmosDiagnostics == null) { return innerErrorMessage(); } return innerErrorMessage() + ", " + cosmosDiagnostics.toString(); } } /** * Gets the activity ID associated with the request. * * @return the activity ID. */ public String getActivityId() { if (this.responseHeaders != null) { return this.responseHeaders.get(HttpConstants.HttpHeaders.ACTIVITY_ID); } return null; } /** * Gets the http status code. * * @return the status code. */ public int getStatusCode() { return this.statusCode; } /** * Gets the sub status code. * * @return the status code. */ public int getSubStatusCode() { int code = HttpConstants.SubStatusCodes.UNKNOWN; if (this.responseHeaders != null) { String subStatusString = this.responseHeaders.get(HttpConstants.HttpHeaders.SUB_STATUS); if (StringUtils.isNotEmpty(subStatusString)) { try { code = Integer.parseInt(subStatusString); } catch (NumberFormatException e) { } } } return code; } void setSubStatusCode(int subStatusCode) { this.responseHeaders.put(HttpConstants.HttpHeaders.SUB_STATUS, Integer.toString(subStatusCode)); } /** * Gets the error code associated with the exception. * * @return the error. */ CosmosError getError() { return this.cosmosError; } void setError(CosmosError cosmosError) { this.cosmosError = cosmosError; } /** * Gets the recommended time duration after which the client can retry failed * requests * * @return the recommended time duration after which the client can retry failed * requests. */ public Duration getRetryAfterDuration() { long retryIntervalInMilliseconds = 0; if (this.responseHeaders != null) { String header = this.responseHeaders.get(HttpConstants.HttpHeaders.RETRY_AFTER_IN_MILLISECONDS); if (StringUtils.isNotEmpty(header)) { try { retryIntervalInMilliseconds = Long.parseLong(header); } catch (NumberFormatException e) { } } } return Duration.ofMillis(retryIntervalInMilliseconds); } /** * Gets the response headers as key-value pairs * * @return the response headers */ public Map<String, String> getResponseHeaders() { return this.responseHeaders; } /** * Gets the resource address associated with this exception. * * @return the resource address associated with this exception. */ String getResourceAddress() { return this.resourceAddress; } /** * Gets the Cosmos Diagnostic Statistics associated with this exception. * * @return Cosmos Diagnostic Statistics associated with this exception. */ public CosmosDiagnostics getDiagnostics() { return cosmosDiagnostics; } CosmosException setDiagnostics(CosmosDiagnostics cosmosDiagnostics) { this.cosmosDiagnostics = cosmosDiagnostics; return this; } /** * Gets the request charge as request units (RU) consumed by the operation. * <p> * For more information about the RU and factors that can impact the effective charges please visit * <a href="https: * * @return the request charge. */ public double getRequestCharge() { String value = this.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (StringUtils.isEmpty(value)) { return 0; } return Double.valueOf(value); } @Override String innerErrorMessage() { String innerErrorMessage = super.getMessage(); if (cosmosError != null) { innerErrorMessage = cosmosError.getMessage(); if (innerErrorMessage == null) { innerErrorMessage = String.valueOf( ModelBridgeInternal.getObjectFromJsonSerializable(cosmosError, "Errors")); } } return innerErrorMessage; } private String causeInfo() { Throwable cause = getCause(); if (cause != null) { return String.format("[class: %s, message: %s]", cause.getClass(), cause.getMessage()); } return null; } private List<Map.Entry<String, String>> filterSensitiveData(Map<String, String> requestHeaders) { if (requestHeaders == null) { return null; } return requestHeaders.entrySet().stream().filter(entry -> !HttpConstants.HttpHeaders.AUTHORIZATION.equalsIgnoreCase(entry.getKey())) .collect(Collectors.toList()); } RequestTimeline getRequestTimeline() { return this.requestTimeline; } void setRequestTimeline(RequestTimeline requestTimeline) { this.requestTimeline = requestTimeline; } void setResourceAddress(String resourceAddress) { this.resourceAddress = resourceAddress; } void setRntbdServiceEndpointStatistics(RntbdEndpointStatistics rntbdEndpointStatistics) { this.rntbdEndpointStatistics = rntbdEndpointStatistics; } RntbdEndpointStatistics getRntbdServiceEndpointStatistics() { return this.rntbdEndpointStatistics; } void setRntbdRequestLength(int rntbdRequestLength) { this.rntbdRequestLength = rntbdRequestLength; } int getRntbdRequestLength() { return this.rntbdRequestLength; } void setRntbdResponseLength(int rntbdResponseLength) { this.rntbdResponseLength = rntbdResponseLength; } int getRntbdResponseLength() { return this.rntbdResponseLength; } void setRequestPayloadLength(int requestBodyLength) { this.requestPayloadLength = requestBodyLength; } int getRequestPayloadLength() { return this.requestPayloadLength; } boolean hasSendingRequestStarted() { return this.sendingRequestHasStarted; } void setSendingRequestHasStarted(boolean hasSendingRequestStarted) { this.sendingRequestHasStarted = hasSendingRequestStarted; } int getRntbdChannelTaskQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdChannelTaskQueueSize(int rntbdChannelTaskQueueSize) { this.rntbdChannelTaskQueueSize = rntbdChannelTaskQueueSize; } int getRntbdPendingRequestQueueSize() { return this.rntbdChannelTaskQueueSize; } void setRntbdPendingRequestQueueSize(int rntbdPendingRequestQueueSize) { this.rntbdPendingRequestQueueSize = rntbdPendingRequestQueueSize; } }
Could this also be ContainerNotFound?
public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw logger.logThrowableAsError( new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); try { readAttributes(path, BasicFileAttributes.class); } catch(IOException e) { if (e.getCause() != null && e.getCause() instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) e.getCause()).getErrorCode())) { throw logger.logThrowableAsError(new NoSuchFileException(path.toString())); } else { throw e; } } }
&& BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) e.getCause()).getErrorCode())) {
public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw LoggingUtility.logError(logger, new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); /* Some static utility methods in the jdk require checking access on a root. ReadAttributes is not supported on roots as they are containers. Furthermore, we always assume that roots exist as they are verified at creation and cannot be deleted by the file system. Thus, we prefer a short circuit for roots. */ if (path instanceof AzurePath && ((AzurePath) path).isRoot()) { return; } try { readAttributes(path, BasicFileAttributes.class); } catch (IOException e) { Throwable cause = e.getCause(); if (cause instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) cause).getErrorCode())) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } else { throw LoggingUtility.logError(logger, e); } } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw new UnsupportedOperationException(); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw new UnsupportedOperationException("Unsupported option: " + option.toString()); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw logger.logThrowableAsError( new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw new UnsupportedOperationException(); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw new UnsupportedOperationException(); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw new UnsupportedOperationException(); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw new UnsupportedOperationException(); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported option: " + option.toString())); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
Just a thought - should auth errors be wrapped into AccessDeniedExceptions?
public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw logger.logThrowableAsError( new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); try { readAttributes(path, BasicFileAttributes.class); } catch(IOException e) { if (e.getCause() != null && e.getCause() instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) e.getCause()).getErrorCode())) { throw logger.logThrowableAsError(new NoSuchFileException(path.toString())); } else { throw e; } } }
} catch(IOException e) {
public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw LoggingUtility.logError(logger, new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); /* Some static utility methods in the jdk require checking access on a root. ReadAttributes is not supported on roots as they are containers. Furthermore, we always assume that roots exist as they are verified at creation and cannot be deleted by the file system. Thus, we prefer a short circuit for roots. */ if (path instanceof AzurePath && ((AzurePath) path).isRoot()) { return; } try { readAttributes(path, BasicFileAttributes.class); } catch (IOException e) { Throwable cause = e.getCause(); if (cause instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) cause).getErrorCode())) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } else { throw LoggingUtility.logError(logger, e); } } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw new UnsupportedOperationException(); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw new UnsupportedOperationException("Unsupported option: " + option.toString()); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw logger.logThrowableAsError( new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw new UnsupportedOperationException(); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw new UnsupportedOperationException(); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw new UnsupportedOperationException(); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw new UnsupportedOperationException(); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported option: " + option.toString())); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
Once I add support for containers, yes. A good reminder that I'll have to update a lot of error handling like that
public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw logger.logThrowableAsError( new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); try { readAttributes(path, BasicFileAttributes.class); } catch(IOException e) { if (e.getCause() != null && e.getCause() instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) e.getCause()).getErrorCode())) { throw logger.logThrowableAsError(new NoSuchFileException(path.toString())); } else { throw e; } } }
&& BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) e.getCause()).getErrorCode())) {
public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw LoggingUtility.logError(logger, new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); /* Some static utility methods in the jdk require checking access on a root. ReadAttributes is not supported on roots as they are containers. Furthermore, we always assume that roots exist as they are verified at creation and cannot be deleted by the file system. Thus, we prefer a short circuit for roots. */ if (path instanceof AzurePath && ((AzurePath) path).isRoot()) { return; } try { readAttributes(path, BasicFileAttributes.class); } catch (IOException e) { Throwable cause = e.getCause(); if (cause instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) cause).getErrorCode())) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } else { throw LoggingUtility.logError(logger, e); } } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw new UnsupportedOperationException(); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw new UnsupportedOperationException("Unsupported option: " + option.toString()); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw logger.logThrowableAsError( new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw new UnsupportedOperationException(); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw new UnsupportedOperationException(); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw new UnsupportedOperationException(); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw new UnsupportedOperationException(); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported option: " + option.toString())); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
Good question. I tend to think that debugging will be easier if issues with underlying blob stuff is presented differently from nio stuff. I can see an argument where that kind of breaks the point of mapping one to the other, but an AccessDeniedException seems like it would have a response of "well let me go grant access" which is impossible in this case, and an IOException caused by a BlobStorageException kind of leads me down a path of something is wrong with my account or nio configs. What do you think?
public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw logger.logThrowableAsError( new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); try { readAttributes(path, BasicFileAttributes.class); } catch(IOException e) { if (e.getCause() != null && e.getCause() instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) e.getCause()).getErrorCode())) { throw logger.logThrowableAsError(new NoSuchFileException(path.toString())); } else { throw e; } } }
} catch(IOException e) {
public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw LoggingUtility.logError(logger, new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); /* Some static utility methods in the jdk require checking access on a root. ReadAttributes is not supported on roots as they are containers. Furthermore, we always assume that roots exist as they are verified at creation and cannot be deleted by the file system. Thus, we prefer a short circuit for roots. */ if (path instanceof AzurePath && ((AzurePath) path).isRoot()) { return; } try { readAttributes(path, BasicFileAttributes.class); } catch (IOException e) { Throwable cause = e.getCause(); if (cause instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) cause).getErrorCode())) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } else { throw LoggingUtility.logError(logger, e); } } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw new UnsupportedOperationException(); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw new UnsupportedOperationException("Unsupported option: " + option.toString()); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw logger.logThrowableAsError( new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw new UnsupportedOperationException(); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw new UnsupportedOperationException(); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw new UnsupportedOperationException(); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw new UnsupportedOperationException(); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported option: " + option.toString())); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
If that's the general pattern we've aimed for with nio then I'm fine with that.
public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw logger.logThrowableAsError( new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); try { readAttributes(path, BasicFileAttributes.class); } catch(IOException e) { if (e.getCause() != null && e.getCause() instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) e.getCause()).getErrorCode())) { throw logger.logThrowableAsError(new NoSuchFileException(path.toString())); } else { throw e; } } }
} catch(IOException e) {
public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw LoggingUtility.logError(logger, new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); /* Some static utility methods in the jdk require checking access on a root. ReadAttributes is not supported on roots as they are containers. Furthermore, we always assume that roots exist as they are verified at creation and cannot be deleted by the file system. Thus, we prefer a short circuit for roots. */ if (path instanceof AzurePath && ((AzurePath) path).isRoot()) { return; } try { readAttributes(path, BasicFileAttributes.class); } catch (IOException e) { Throwable cause = e.getCause(); if (cause instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) cause).getErrorCode())) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } else { throw LoggingUtility.logError(logger, e); } } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw new UnsupportedOperationException(); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw new UnsupportedOperationException("Unsupported option: " + option.toString()); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw logger.logThrowableAsError( new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw new UnsupportedOperationException(); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw new UnsupportedOperationException(); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw new UnsupportedOperationException(); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw new UnsupportedOperationException(); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported option: " + option.toString())); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
I'm not sure if it's generalized yet haha. Still could change if you think otherwise. But we'd have to add that explicit check in a lot of different places, which I'm also not a fan of because I think we're likely to forget one.
public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw logger.logThrowableAsError( new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); try { readAttributes(path, BasicFileAttributes.class); } catch(IOException e) { if (e.getCause() != null && e.getCause() instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) e.getCause()).getErrorCode())) { throw logger.logThrowableAsError(new NoSuchFileException(path.toString())); } else { throw e; } } }
} catch(IOException e) {
public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw LoggingUtility.logError(logger, new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); /* Some static utility methods in the jdk require checking access on a root. ReadAttributes is not supported on roots as they are containers. Furthermore, we always assume that roots exist as they are verified at creation and cannot be deleted by the file system. Thus, we prefer a short circuit for roots. */ if (path instanceof AzurePath && ((AzurePath) path).isRoot()) { return; } try { readAttributes(path, BasicFileAttributes.class); } catch (IOException e) { Throwable cause = e.getCause(); if (cause instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) cause).getErrorCode())) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } else { throw LoggingUtility.logError(logger, e); } } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw new UnsupportedOperationException(); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw new UnsupportedOperationException("Unsupported option: " + option.toString()); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw logger.logThrowableAsError( new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw new UnsupportedOperationException(); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw new UnsupportedOperationException(); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw new UnsupportedOperationException(); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw new UnsupportedOperationException(); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported option: " + option.toString())); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
We should update other locations to using `LoggingUtility` when throwing exceptions
public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw new UnsupportedOperationException("Unsupported option: " + option.toString()); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); }
throw LoggingUtility.logError(logger,
public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported option: " + option.toString())); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw new UnsupportedOperationException(); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw new UnsupportedOperationException(); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw new UnsupportedOperationException(); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw new UnsupportedOperationException(); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw LoggingUtility.logError(logger, new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); try { readAttributes(path, BasicFileAttributes.class); } catch (IOException e) { Throwable cause = e.getCause(); if (cause instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) cause).getErrorCode())) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } else { throw e; } } } /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw new UnsupportedOperationException(); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw LoggingUtility.logError(logger, new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); /* Some static utility methods in the jdk require checking access on a root. ReadAttributes is not supported on roots as they are containers. Furthermore, we always assume that roots exist as they are verified at creation and cannot be deleted by the file system. Thus, we prefer a short circuit for roots. */ if (path instanceof AzurePath && ((AzurePath) path).isRoot()) { return; } try { readAttributes(path, BasicFileAttributes.class); } catch (IOException e) { Throwable cause = e.getCause(); if (cause instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) cause).getErrorCode())) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } else { throw LoggingUtility.logError(logger, e); } } } /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
We should log this throw
public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw LoggingUtility.logError(logger, new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); try { readAttributes(path, BasicFileAttributes.class); } catch (IOException e) { Throwable cause = e.getCause(); if (cause instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) cause).getErrorCode())) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } else { throw e; } } }
throw e;
public void checkAccess(Path path, AccessMode... accessModes) throws IOException { if (accessModes != null && accessModes.length != 0) { throw LoggingUtility.logError(logger, new AccessDeniedException("The access cannot be determined.")); } AzurePath.ensureFileSystemOpen(path); /* Some static utility methods in the jdk require checking access on a root. ReadAttributes is not supported on roots as they are containers. Furthermore, we always assume that roots exist as they are verified at creation and cannot be deleted by the file system. Thus, we prefer a short circuit for roots. */ if (path instanceof AzurePath && ((AzurePath) path).isRoot()) { return; } try { readAttributes(path, BasicFileAttributes.class); } catch (IOException e) { Throwable cause = e.getCause(); if (cause instanceof BlobStorageException && BlobErrorCode.BLOB_NOT_FOUND.equals(((BlobStorageException) cause).getErrorCode())) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } else { throw LoggingUtility.logError(logger, e); } } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw new UnsupportedOperationException(); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw new UnsupportedOperationException("Unsupported option: " + option.toString()); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw new UnsupportedOperationException(); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw new UnsupportedOperationException(); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw new UnsupportedOperationException(); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw new UnsupportedOperationException(); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
class AzureFileSystemProvider extends FileSystemProvider { private final ClientLogger logger = new ClientLogger(AzureFileSystemProvider.class); /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_TYPE = "Content-Type"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_DISPOSITION = "Content-Disposition"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_LANGUAGE = "Content-Language"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_ENCODING = "Content-Encoding"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CONTENT_MD5 = "Content-MD5"; /** * A helper for setting the HTTP properties when creating a directory. */ public static final String CACHE_CONTROL = "Cache-Control"; private static final String ACCOUNT_QUERY_KEY = "account"; private static final int COPY_TIMEOUT_SECONDS = 30; private final ConcurrentMap<String, FileSystem> openFileSystems; /** * Creates an AzureFileSystemProvider. */ public AzureFileSystemProvider() { this.openFileSystems = new ConcurrentHashMap<>(); } /** * Returns the URI scheme that identifies this provider: {@code "azb".} * * @return {@code "azb"} */ @Override public String getScheme() { return "azb"; } /** * Constructs a new FileSystem object identified by a URI. * <p> * The format of a {@code URI} identifying a file system is {@code "azb: * <p> * Once closed, a file system with the same identifier may be reopened. * * @param uri URI reference * @param config A map of provider specific properties to configure the file system * @return a new file system. * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met, or the env parameter * does not contain properties required by the provider, or a property value is invalid. * @throws IOException If an I/O error occurs. * @throws SecurityException never * @throws FileSystemAlreadyExistsException If the file system has already been created. */ @Override public FileSystem newFileSystem(URI uri, Map<String, ?> config) throws IOException { String accountName = extractAccountName(uri); if (this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemAlreadyExistsException("Name: " + accountName)); } AzureFileSystem afs = new AzureFileSystem(this, accountName, config); this.openFileSystems.put(accountName, afs); return afs; } /** * Returns an existing FileSystem created by this provider. * <p> * The format of a {@code URI} identifying an file system is {@code "azb: * <p> * Trying to retrieve a closed file system will throw a {@link FileSystemNotFoundException}. Once closed, a * file system with the same identifier may be reopened. * * @param uri URI reference * @return the file system * @throws IllegalArgumentException If the pre-conditions for the uri parameter aren't met * @throws FileSystemNotFoundException If the file system already exists * @throws SecurityException never */ @Override public FileSystem getFileSystem(URI uri) { String accountName = extractAccountName(uri); if (!this.openFileSystems.containsKey(accountName)) { throw LoggingUtility.logError(this.logger, new FileSystemNotFoundException("Name: " + accountName)); } return this.openFileSystems.get(accountName); } /** * Return a Path object by converting the given URI. The resulting Path is associated with a FileSystem that already * exists. * * @param uri The URI to convert * @return The path identified by the URI. * @throws IllegalArgumentException If the URI scheme does not identify this provider or other preconditions on the * uri parameter do not hold * @throws FileSystemNotFoundException if the file system identified by the query does not exist * @throws SecurityException never * * @see */ @Override public Path getPath(URI uri) { return getFileSystem(uri).getPath(uri.getPath()); } /** * Unsupported. Use {@link * instead. * * @param path the Path * @param set open options * @param fileAttributes attributes * @return a new seekable byte channel * @throws UnsupportedOperationException Operation is not supported. * @throws IllegalArgumentException if the set contains an invalid combination of options * @throws FileAlreadyExistsException if a file of that name already exists and the CREATE_NEW option is specified * (optional specific exception) * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> set, FileAttribute<?>... fileAttributes) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Opens an {@link InputStream} to the given path. * <p> * The stream will not attempt to read or buffer the entire file. However, when fetching data, it will always * request the same size chunk of several MB to prevent network thrashing on small reads. Mark and reset are * supported. * <p> * Only {@link StandardOpenOption * * @param path the path to the file to open * @param options options specifying how the file is opened * @return a new input stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public InputStream newInputStream(Path path, OpenOption... options) throws IOException { if (options.length > 1 || (options.length > 0 && !options[0].equals(StandardOpenOption.READ))) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Only the read option is supported.")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); if (!resource.checkDirStatus().equals(DirectoryStatus.NOT_A_DIRECTORY)) { throw LoggingUtility.logError(logger, new IOException("Path either does not exist or points to a directory." + "Path must point to a file. Path: " + path.toString())); } return new NioBlobInputStream(resource.getBlobClient().openInputStream(), resource.getPath()); } /** * Opens an {@link OutputStream} to the given path. The resulting file will be stored as a block blob. * <p> * The only supported options are {@link StandardOpenOption * {@link StandardOpenOption * {@link UnsupportedOperationException}. {@code WRITE} and {@code TRUNCATE_EXISTING} must be specified or an * {@link IllegalArgumentException} will be thrown. Hence, files cannot be updated, only overwritten completely. * <p> * This stream will not attempt to buffer the entire file, however some buffering will be done for potential * optimizations and to avoid network thrashing. Specifically, up to * {@link AzureFileSystem * exceeded, the data will be broken into chunks and sent in blocks, and writes will be buffered into sizes of * {@link AzureFileSystem * allocated is defined by {@link AzureFileSystem * the level of parallelism with which we may write and thus may affect write speeds as well. * <p> * The data is only committed when the steam is closed. Hence data cannot be read from the destination until the * stream is closed. When the close method returns, it is guaranteed that, barring any errors, the data is finalized * and available for reading. * <p> * Writing happens asynchronously. Bytes passed for writing are stored until either the threshold or block size are * met at which time they are sent to the service. When the write method returns, there is no guarantee about which * phase of this process the data is in other than it has been accepted and will be written. Again, closing will * guarantee that the data is written and available. * <p> * Flush is a no-op as regards data transfers, but it can be used to check the state of the stream for errors. * This can be a useful tool because writing happens asynchronously, and therefore an error from a previous write * may not otherwise be thrown unless the stream is flushed, closed, or written to again. * * @param path the path to the file to open or create * @param options options specifying how the file is opened * @return a new output stream * @throws IllegalArgumentException if an invalid combination of options is specified * @throws UnsupportedOperationException if an unsupported option is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public OutputStream newOutputStream(Path path, OpenOption... options) throws IOException { if (options == null || options.length == 0) { options = new OpenOption[] { StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING }; } List<OpenOption> optionsList = Arrays.asList(options); List<OpenOption> supportedOptions = Arrays.asList( StandardOpenOption.CREATE_NEW, StandardOpenOption.CREATE, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); for (OpenOption option : optionsList) { if (!supportedOptions.contains(option)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported option: " + option.toString())); } } if (!optionsList.contains(StandardOpenOption.WRITE) || !optionsList.contains(StandardOpenOption.TRUNCATE_EXISTING)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Write and TruncateExisting must be specified to open an OutputStream")); } AzureResource resource = new AzureResource(path); AzurePath.ensureFileSystemOpen(resource.getPath()); DirectoryStatus status = resource.checkDirStatus(); if (DirectoryStatus.isDirectory(status)) { throw LoggingUtility.logError(logger, new IOException("Cannot open an OutputStream to a directory. Path: " + path.toString())); } if (status.equals(DirectoryStatus.DOES_NOT_EXIST) && !(optionsList.contains(StandardOpenOption.CREATE) || optionsList.contains(StandardOpenOption.CREATE_NEW))) { throw LoggingUtility.logError(logger, new IOException("Writing to an empty location requires a create " + "option. Path: " + path.toString())); } if (status.equals(DirectoryStatus.NOT_A_DIRECTORY) && optionsList.contains(StandardOpenOption.CREATE_NEW)) { throw LoggingUtility.logError(logger, new IOException("A file already exists at this location and " + "CREATE_NEW was specified. Path: " + path.toString())); } AzureFileSystem fs = (AzureFileSystem) (path.getFileSystem()); Integer blockSize = fs.getBlockSize() == null ? null : fs.getBlockSize().intValue(); Integer putBlobThreshold = fs.getPutBlobThreshold() == null ? null : fs.getPutBlobThreshold().intValue(); ParallelTransferOptions pto = new ParallelTransferOptions(blockSize, fs.getMaxConcurrencyPerRequest(), null, putBlobThreshold); BlobRequestConditions rq = null; if (optionsList.contains(StandardOpenOption.CREATE_NEW)) { rq = new BlobRequestConditions().setIfNoneMatch("*"); } return new NioBlobOutputStream(resource.getBlobClient().getBlockBlobClient().getBlobOutputStream(pto, null, null, null, rq), resource.getPath()); } /** * Returns an {@link AzureDirectoryStream} for iterating over the contents of a directory. The elements returned by * the directory stream's iterator are of type Path, each one representing an entry in the directory. The Path * objects are obtained as if by resolving the name of the directory entry against dir. The entries returned by the * iterator are filtered by the given filter. * <p> * When not using the try-with-resources construct, then directory stream's close method should be invoked after * iteration is completed so as to free any resources held for the open directory. * <p> * Where the filter terminates due to an uncaught error or runtime exception then it is propagated to the hasNext or * next method. Where an IOException is thrown, it results in the hasNext or next method throwing a * DirectoryIteratorException with the IOException as the cause. * * @param path the path to the directory * @param filter the directory stream filter * @return a new and open {@code DirectoryStream} object * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NotDirectoryException if the file could not otherwise be opened because it is not a directory * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public DirectoryStream<Path> newDirectoryStream(Path path, DirectoryStream.Filter<? super Path> filter) throws IOException { if (!(path instanceof AzurePath)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("This provider cannot operate on " + "subtypes of Path other than AzurePath")); } AzurePath.ensureFileSystemOpen(path); /* Ensure the path is a directory. Note that roots are always directories. The case of an invalid root will be caught in instatiating the stream below. Possible optimization later is to save the result of the list call to use as the first list call inside the stream rather than a list call for checking the status and a list call for listing. */ if (!((AzurePath) path).isRoot() && !(new AzureResource(path).checkDirectoryExists())) { throw LoggingUtility.logError(logger, new NotDirectoryException(path.toString())); } return new AzureDirectoryStream((AzurePath) path, filter); } /** * Creates a new directory at the specified path. * <p> * The existence of a directory in the {@code AzureFileSystem} is defined on two levels. <i>Weak existence</i> is * defined by the presence of a non-zero number of blobs prefixed with the directory's path. This concept is also * known as a <i>virtual directory</i> and enables the file system to work with containers that were pre-loaded * with data by another source but need to be accessed by this file system. <i>Strong existence</i> is defined as * the presence of an actual storage resource at the given path, which in the case of directories, is a zero-length * blob whose name is the directory path with a particular metadata field indicating the blob's status as a * directory. This is also known as a <i>concrete directory</i>. Directories created by this file system will * strongly exist. Operations targeting directories themselves as the object (e.g. setting properties) will target * marker blobs underlying concrete directories. Other operations (e.g. listing) will operate on the blob-name * prefix. * <p> * This method fulfills the nio contract of: "The check for the existence of the file and the creation of the * directory if it does not exist are a single operation that is atomic with respect to all other filesystem * activities that might affect the directory." More specifically, this method will atomically check for <i>strong * existence</i> of another file or directory at the given path and fail if one is present. On the other hand, we * only check for <i>weak existence</i> of the parent to determine if the given path is valid. Additionally, the * action of checking whether the parent exists, is <i>not</i> atomic with the creation of the directory. Note that * while it is possible that the parent may be deleted between when the parent is determined to exist and the * creation of the child, the creation of the child will always ensure the existence of a virtual parent, so the * child will never be left floating and unreachable. The different checks on parent and child is due to limitations * in the Storage service API. * <p> * There may be some unintuitive behavior when working with directories in this file system, particularly virtual * directories (usually those not created by this file system). A virtual directory will disappear as soon as all * its children have been deleted. Furthermore, if a directory with the given path weakly exists at the time of * calling this method, this method will still return success and create a concrete directory at the target * location. In other words, it is possible to "double create" a directory if it first weakly exists and then is * strongly created. This is both because it is impossible to atomically check if a virtual directory exists while * creating a concrete directory and because such behavior will have minimal side effects--no files will be * overwritten and the directory will still be available for writing as intended, though it may not be empty. This * is not a complete list of such unintuitive behavior. * <p> * This method will attempt to extract standard HTTP content headers from the list of file attributes to set them * as blob headers. All other attributes will be set as blob metadata. The value of every attribute will be * converted to a {@code String} with the exception of the Content-MD5 attribute which expects a {@code byte[]}. * When extracting the content headers, the following strings will be used for comparison (constants for these * values can be found on this type): * <ul> * <li>{@code Content-Type}</li> * <li>{@code Content-Disposition}</li> * <li>{@code Content-Language}</li> * <li>{@code Content-Encoding}</li> * <li>{@code Content-MD5}</li> * <li>{@code Cache-Control}</li> * </ul> * Note that these properties also have a particular semantic in that if one is specified, all are updated. In other * words, if any of the above is set, all those that are not set will be cleared. See the * <a href="https: * information. * * @param path the directory to create * @param fileAttributes an optional list of file attributes to set atomically when creating the directory * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws UnsupportedOperationException if the array contains an attribute that cannot be set atomically when * creating the directory * @throws FileAlreadyExistsException if a directory could not otherwise be created because a file of that name * already exists * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void createDirectory(Path path, FileAttribute<?>... fileAttributes) throws IOException { fileAttributes = fileAttributes == null ? new FileAttribute<?>[0] : fileAttributes; AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); if (azureResource.checkParentDirectoryExists()) { try { azureResource.setFileAttributes(Arrays.asList(fileAttributes)) .putDirectoryBlob(new BlobRequestConditions().setIfNoneMatch("*")); } catch (BlobStorageException e) { if (e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(azureResource.getPath().toString())); } else { throw LoggingUtility.logError(logger, new IOException("An error occurred when creating the directory", e)); } } } else { throw LoggingUtility.logError(logger, new IOException("Parent directory does not exist for path: " + azureResource.getPath().toString())); } } /** * Deletes the specified resource. * <p> * This method is not atomic with respect to other file system operations. It is possible to delete a file in use by * another process, and doing so will not immediately invalidate any channels open to that file--they will simply * start to fail. Root directories cannot be deleted even when empty. * * @param path the path to the file to delete * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws NoSuchFileException if the file does not exist * @throws DirectoryNotEmptyException if the file is a directory and could not otherwise be deleted because the * directory is not empty * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void delete(Path path) throws IOException { AzureResource azureResource = new AzureResource(path); AzurePath.ensureFileSystemOpen(azureResource.getPath()); DirectoryStatus dirStatus = azureResource.checkDirStatus(); if (dirStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } if (dirStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(path.toString())); } try { azureResource.getBlobClient().delete(); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { throw LoggingUtility.logError(logger, new NoSuchFileException(path.toString())); } throw LoggingUtility.logError(logger, new IOException(e)); } } /** * Copies the resource at the source location to the destination. * <p> * This method is not atomic with respect to other file system operations. More specifically, the checks necessary * to validate the inputs and state of the file system are not atomic with the actual copying of data. If the copy * is triggered, the copy itself is atomic and only a complete copy will ever be left at the destination. * <p> * In addition to those in the docs for {@link FileSystemProvider * the following requirements for successful completion. {@link StandardCopyOption * as it is impossible not to copy blob properties; if this option is not passed, an * {@link UnsupportedOperationException} will be thrown. Neither the source nor the destination can be a root * directory; if either is a root directory, an {@link IllegalArgumentException} will be thrown. The parent * directory of the destination must at least weakly exist; if it does not, an {@link IOException} will be thrown. * The only supported option other than {@link StandardCopyOption * {@link StandardCopyOption * {@link UnsupportedOperationException}. * <p> * This method supports both virtual and concrete directories as both the source and destination. Unlike when * creating a directory, the existence of a virtual directory at the destination will cause this operation to fail. * This is in order to prevent the possibility of overwriting a non-empty virtual directory with a file. Still, as * mentioned above, this check is not atomic with the creation of the resultant directory. * * @param source the path to the file to copy * @param destination the path to the target file * @param copyOptions specifying how the copy should be done * @throws UnsupportedOperationException if the array contains a copy option that is not supported * @throws FileAlreadyExistsException if the target file exists but cannot be replaced because the REPLACE_EXISTING * option is not specified * @throws DirectoryNotEmptyException the REPLACE_EXISTING option is specified but the file cannot be replaced * because it is a non-empty directory * @throws IOException If an I/O error occurs. * @throws IllegalArgumentException If the path type is not an instance of {@link AzurePath}. * @throws SecurityException never * @see */ @Override public void copy(Path source, Path destination, CopyOption... copyOptions) throws IOException { if (source.equals(destination)) { return; } boolean replaceExisting = false; List<CopyOption> optionsList = new ArrayList<>(Arrays.asList(copyOptions)); if (!optionsList.contains(StandardCopyOption.COPY_ATTRIBUTES)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException( "StandardCopyOption.COPY_ATTRIBUTES must be specified as the service will always copy " + "file attributes.")); } optionsList.remove(StandardCopyOption.COPY_ATTRIBUTES); if (optionsList.contains(StandardCopyOption.REPLACE_EXISTING)) { replaceExisting = true; optionsList.remove(StandardCopyOption.REPLACE_EXISTING); } if (!optionsList.isEmpty()) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Unsupported copy option found. " + "Only StandardCopyOption.COPY_ATTRIBUTES and StandareCopyOption.REPLACE_EXISTING are supported.")); } AzureResource sourceRes = new AzureResource(source); AzurePath.ensureFileSystemOpen(sourceRes.getPath()); AzureResource destinationRes = new AzureResource(destination); AzurePath.ensureFileSystemOpen(destinationRes.getPath()); DirectoryStatus destinationStatus = destinationRes.checkDirStatus(); if (destinationStatus.equals(DirectoryStatus.NOT_EMPTY)) { throw LoggingUtility.logError(logger, new DirectoryNotEmptyException(destination.toString())); } /* Set request conditions if we should not overwrite. We can error out here if we know something already exists, but we will also create request conditions as a safeguard against overwriting something that was created between our check and put. */ BlobRequestConditions requestConditions = null; if (!replaceExisting) { if (!destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST)) { throw LoggingUtility.logError(logger, new FileAlreadyExistsException(destinationRes.getPath().toString())); } requestConditions = new BlobRequestConditions().setIfNoneMatch("*"); } /* More path validation Check that the parent for the destination exists. We only need to perform this check if there is nothing currently at the destination, for if the destination exists, its parent at least weakly exists and we can skip a service call. */ if (destinationStatus.equals(DirectoryStatus.DOES_NOT_EXIST) && !destinationRes.checkParentDirectoryExists()) { throw LoggingUtility.logError(logger, new IOException("Parent directory of destination location does not " + "exist. The destination path is therefore invalid. Destination: " + destinationRes.getPath().toString())); } /* Try to copy the resource at the source path. There is an optimization here where we try to do the copy first and only check for a virtual directory if there's a 404. In the cases of files and concrete directories, this only requires one request. For virtual directories, however, this requires three requests: failed copy, check status, create directory. Depending on customer scenarios and how many virtual directories they copy, it could be better to check the directory status first and then do a copy or createDir, which would always be two requests for all resource types. */ try { SyncPoller<BlobCopyInfo, Void> pollResponse = destinationRes.getBlobClient().beginCopy(sourceRes.getBlobClient().getBlobUrl(), null, null, null, null, requestConditions, null); pollResponse.waitForCompletion(Duration.ofSeconds(COPY_TIMEOUT_SECONDS)); } catch (BlobStorageException e) { if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND) && !sourceRes.checkDirStatus().equals(DirectoryStatus.DOES_NOT_EXIST)) { /* We already checked that the parent exists and validated the paths above, so we can put the blob directly. */ destinationRes.putDirectoryBlob(requestConditions); } else { throw LoggingUtility.logError(logger, new IOException(e)); } } catch (RuntimeException e) { throw LoggingUtility.logError(logger, new IOException(e)); } } /*int checkRootDirStatus(BlobContainerClient rootClient) { }*/ /** * Unsupported. * * @param path path * @param path1 path * @param copyOptions options * @throws UnsupportedOperationException Operation is not supported. */ @Override public void move(Path path, Path path1, CopyOption... copyOptions) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Unsupported. * * @param path path * @param path1 path * @throws UnsupportedOperationException Operation is not supported. */ @Override public boolean isSameFile(Path path, Path path1) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Always returns false as hidden files are not supported. * * @param path the path * @return false * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public boolean isHidden(Path path) throws IOException { return false; } /** * Unsupported. * * @param path path * @return the file store where the file is stored. * @throws UnsupportedOperationException Operation is not supported. * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public FileStore getFileStore(Path path) throws IOException { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /** * Checks the existence, and optionally the accessibility, of a file. * <p> * This method may only be used to check the existence of a file. It is not possible to determine the permissions * granted to a given client, so if any mode argument is specified, an {@link UnsupportedOperationException} will be * thrown. * * @param path the path to the file to check * @param accessModes The access modes to check; may have zero elements * @throws NoSuchFileException if a file does not exist * @throws java.nio.file.AccessDeniedException the requested access would be denied or the access cannot be * determined because the Java virtual machine has insufficient privileges or other reasons * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override /** * Returns a file attribute view of a given type. * <p> * See {@link AzureBasicFileAttributeView} and {@link AzureBlobFileAttributeView} for more information. * <p> * Reading or setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class object corresponding to the file attribute view * @param linkOptions ignored * @return a file attribute view of the specified type, or null if the attribute view type is not available */ @Override @SuppressWarnings("unchecked") public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... linkOptions) { /* No resource validation is necessary here. That can happen at the time of making a network requests internal to the view object. */ if (type == BasicFileAttributeView.class || type == AzureBasicFileAttributeView.class) { return (V) new AzureBasicFileAttributeView(path); } else if (type == AzureBlobFileAttributeView.class) { return (V) new AzureBlobFileAttributeView(path); } else { return null; } } /** * Reads a file's attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param type the Class of the file attributes required to read * @param linkOptions ignored * @return the file attributes * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override @SuppressWarnings("unchecked") public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); Class<? extends BasicFileAttributeView> view; if (type == BasicFileAttributes.class || type == AzureBasicFileAttributes.class) { view = AzureBasicFileAttributeView.class; } else if (type == AzureBlobFileAttributes.class) { view = AzureBlobFileAttributeView.class; } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException()); } /* Resource validation will happen in readAttributes of the view. We don't want to double check, and checking internal to the view ensures it is always checked no matter which code path is taken. */ return (A) getFileAttributeView(path, view, linkOptions).readAttributes(); } /** * Reads a set of file attributes as a bulk operation. * <p> * See {@link AzureBasicFileAttributes} and {@link AzureBlobFileAttributes} for more information. * <p> * Reading attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attributes to read * @param linkOptions ignored * @return a map of the attributes returned; may be empty. The map's keys are the attribute names, its values are * the attribute values * @throws UnsupportedOperationException if an attributes of the given type are not supported * @throws IllegalArgumentException if no attributes are specified or an unrecognized attributes is specified * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... linkOptions) throws IOException { if (attributes == null) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Attribute string cannot be null.")); } AzurePath.ensureFileSystemOpen(path); Map<String, Object> results = new HashMap<>(); /* AzureBlobFileAttributes can do everything the basic attributes can do and more. There's no need to instantiate one of each if both are specified somewhere in the list as that will waste a network call. This can be generified later if we need to add more attribute types, but for now we can stick to just caching the supplier for a single attributes object. */ Map<String, Supplier<Object>> attributeSuppliers = null; String viewType; String attributeList; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeList = attributes; } else { viewType = parts[0]; attributeList = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (!viewType.equals(AzureBasicFileAttributeView.NAME) && !viewType.equals(AzureBlobFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } for (String attributeName : attributeList.split(",")) { /* We rely on the azureBlobFAV to actually do the work here as mentioned above, but if basic is specified, we should at least validate that the attribute is available on a basic view. */ if (viewType.equals(AzureBasicFileAttributeView.NAME)) { if (!AzureBasicFileAttributes.ATTRIBUTE_STRINGS.contains(attributeName) && !attributeName.equals("*")) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } } if (attributeSuppliers == null) { attributeSuppliers = AzureBlobFileAttributes.getAttributeSuppliers( this.readAttributes(path, AzureBlobFileAttributes.class, linkOptions)); } if (attributeName.equals("*")) { Set<String> attributesToAdd; if (viewType.equals(AzureBasicFileAttributeView.NAME)) { for (String attr : AzureBasicFileAttributes.ATTRIBUTE_STRINGS) { results.put(attr, attributeSuppliers.get(attr).get()); } } else { for (Map.Entry<String, Supplier<Object>> entry: attributeSuppliers.entrySet()) { results.put(entry.getKey(), entry.getValue().get()); } } } else if (!attributeSuppliers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else { results.put(attributeName, attributeSuppliers.get(attributeName).get()); } } if (results.isEmpty()) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No attributes were specified. Attributes: " + attributes)); } return results; } /** * Sets the value of a file attribute. * <p> * See {@link AzureBlobFileAttributeView} for more information. * <p> * Setting attributes on a virtual directory is not supported and will throw an {@link IOException}. See * {@link * * @param path the path to the file * @param attributes the attribute to set * @param value the attribute value * @param linkOptions ignored * @throws UnsupportedOperationException if an attribute view is not available * @throws IllegalArgumentException if the attribute name is not specified, or is not recognized, or the attribute * value is of the correct type but has an inappropriate value * @throws ClassCastException If the attribute value is not of the expected type or is a collection containing * elements that are not of the expected type * @throws IOException If an I/O error occurs. * @throws SecurityException never */ @Override public void setAttribute(Path path, String attributes, Object value, LinkOption... linkOptions) throws IOException { AzurePath.ensureFileSystemOpen(path); String viewType; String attributeName; String[] parts = attributes.split(":"); if (parts.length > 2) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid format for attribute string: " + attributes)); } if (parts.length == 1) { viewType = "basic"; attributeName = attributes; } else { viewType = parts[0]; attributeName = parts[1]; } /* For specificity, our basic implementation of BasicFileAttributes uses the name azureBasic. However, the docs state that "basic" must be supported, so we funnel to azureBasic. */ if (viewType.equals("basic")) { viewType = AzureBasicFileAttributeView.NAME; } if (viewType.equals(AzureBasicFileAttributeView.NAME)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } else if (viewType.equals(AzureBlobFileAttributeView.NAME)) { Map<String, Consumer<Object>> attributeConsumers = AzureBlobFileAttributeView.setAttributeConsumers( this.getFileAttributeView(path, AzureBlobFileAttributeView.class, linkOptions)); if (!attributeConsumers.containsKey(attributeName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("Invalid attribute. View: " + viewType + ". Attribute: " + attributeName)); } try { attributeConsumers.get(attributeName).accept(value); } catch (UncheckedIOException e) { if (e.getMessage().equals(AzureBlobFileAttributeView.ATTR_CONSUMER_ERROR)) { throw LoggingUtility.logError(logger, e.getCause()); } } } else { throw LoggingUtility.logError(logger, new UnsupportedOperationException("Invalid attribute view: " + viewType)); } } void closeFileSystem(String fileSystemName) { this.openFileSystems.remove(fileSystemName); } private String extractAccountName(URI uri) { if (!uri.getScheme().equals(this.getScheme())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI scheme does not match this provider")); } if (CoreUtils.isNullOrEmpty(uri.getQuery())) { throw LoggingUtility.logError(this.logger, new IllegalArgumentException("URI does not contain a query " + "component. FileSystems require a URI of the format \"azb: } String accountName = Flux.fromArray(uri.getQuery().split("&")) .filter(s -> s.startsWith(ACCOUNT_QUERY_KEY + "=")) .switchIfEmpty(Mono.error(LoggingUtility.logError(this.logger, new IllegalArgumentException( "URI does not contain an \"" + ACCOUNT_QUERY_KEY + "=\" parameter. FileSystems require a URI " + "of the format \"azb: .map(s -> s.substring(ACCOUNT_QUERY_KEY.length() + 1)) .blockLast(); if (CoreUtils.isNullOrEmpty(accountName)) { throw LoggingUtility.logError(logger, new IllegalArgumentException("No account name provided in URI" + " query.")); } return accountName; } }
Please update readme and changelog about these.
public void validateUserGroupProperties() { if (authorization.size() > 0 || isResourceServer()) { return; } if (this.sessionStateless) { if (allowedGroupsConfigured()) { LOGGER.warn("Group names are not supported if you set 'sessionSateless' to 'true'."); } } else if (!allowedGroupsConfigured()) { throw new IllegalArgumentException("One of the User Group Properties must be populated. " + "Please populate azure.activedirectory.user-group.allowed-groups"); } }
return;
public void validateUserGroupProperties() { if (authorization.size() > 0 || isResourceServer()) { return; } if (this.sessionStateless) { if (allowedGroupsConfigured()) { LOGGER.warn("Group names are not supported if you set 'sessionSateless' to 'true'."); } } else if (!allowedGroupsConfigured()) { throw new IllegalArgumentException("One of the User Group Properties must be populated. " + "Please populate azure.activedirectory.user-group.allowed-groups"); } }
class UserGroupProperties { /** * Expected UserGroups that an authority will be granted to if found in the response from the MemeberOf Graph * API Call. */ private List<String> allowedGroups = new ArrayList<>(); public List<String> getAllowedGroups() { return allowedGroups; } public void setAllowedGroups(List<String> allowedGroups) { this.allowedGroups = allowedGroups; } }
class UserGroupProperties { /** * Expected UserGroups that an authority will be granted to if found in the response from the MemeberOf Graph * API Call. */ private List<String> allowedGroups = new ArrayList<>(); public List<String> getAllowedGroups() { return allowedGroups; } public void setAllowedGroups(List<String> allowedGroups) { this.allowedGroups = allowedGroups; } }
due to the PUT is `current` https://github.com/azure/azure-rest-api-specs/blob/216e7ae3aafefd2ea16a5b64a49281d21da6cb1a/specification/synapse/resource-manager/Microsoft.Synapse/preview/2019-06-01-preview/sqlPool.json#L1745
protected Observable<SensitivityLabelInner> getInnerAsync() { SqlPoolSensitivityLabelsInner client = this.manager().inner().sqlPoolSensitivityLabels(); return client.getAsync(this.resourceGroupName, this.workspaceName, this.sqlPoolName, this.schemaName, this.tableName, this.columnName, SensitivityLabelSource.CURRENT); }
return client.getAsync(this.resourceGroupName, this.workspaceName, this.sqlPoolName, this.schemaName, this.tableName, this.columnName, SensitivityLabelSource.CURRENT);
protected Observable<SensitivityLabelInner> getInnerAsync() { SqlPoolSensitivityLabelsInner client = this.manager().inner().sqlPoolSensitivityLabels(); return client.getAsync(this.resourceGroupName, this.workspaceName, this.sqlPoolName, this.schemaName, this.tableName, this.columnName, this.sensitivityLabelSource); }
class SensitivityLabelsImpl extends CreatableUpdatableImpl<SensitivityLabels, SensitivityLabelInner, SensitivityLabelsImpl> implements SensitivityLabels, SensitivityLabels.Definition, SensitivityLabels.Update { private final SynapseManager manager; private String resourceGroupName; private String workspaceName; private String sqlPoolName; private String schemaName; private String tableName; private String columnName; SensitivityLabelsImpl(String name, SynapseManager manager) { super(name, new SensitivityLabelInner()); this.manager = manager; this.columnName = name; } SensitivityLabelsImpl(SensitivityLabelInner inner, SynapseManager manager) { super(inner.name(), inner); this.manager = manager; this.columnName = inner.name(); this.resourceGroupName = IdParsingUtils.getValueFromIdByName(inner.id(), "resourceGroups"); this.workspaceName = IdParsingUtils.getValueFromIdByName(inner.id(), "workspaces"); this.sqlPoolName = IdParsingUtils.getValueFromIdByName(inner.id(), "sqlPools"); this.schemaName = IdParsingUtils.getValueFromIdByName(inner.id(), "schemas"); this.tableName = IdParsingUtils.getValueFromIdByName(inner.id(), "tables"); this.columnName = IdParsingUtils.getValueFromIdByName(inner.id(), "columns"); } @Override public SynapseManager manager() { return this.manager; } @Override public Observable<SensitivityLabels> createResourceAsync() { SqlPoolSensitivityLabelsInner client = this.manager().inner().sqlPoolSensitivityLabels(); return client.createOrUpdateAsync(this.resourceGroupName, this.workspaceName, this.sqlPoolName, this.schemaName, this.tableName, this.columnName, this.inner()) .map(innerToFluentMap(this)); } @Override public Observable<SensitivityLabels> updateResourceAsync() { SqlPoolSensitivityLabelsInner client = this.manager().inner().sqlPoolSensitivityLabels(); return client.createOrUpdateAsync(this.resourceGroupName, this.workspaceName, this.sqlPoolName, this.schemaName, this.tableName, this.columnName, this.inner()) .map(innerToFluentMap(this)); } @Override @Override public boolean isInCreateMode() { return this.inner().id() == null; } @Override public String id() { return this.inner().id(); } @Override public String informationType() { return this.inner().informationType(); } @Override public String informationTypeId() { return this.inner().informationTypeId(); } @Override public Boolean isDisabled() { return this.inner().isDisabled(); } @Override public String labelId() { return this.inner().labelId(); } @Override public String labelName() { return this.inner().labelName(); } @Override public String name() { return this.inner().name(); } @Override public String type() { return this.inner().type(); } @Override public SensitivityLabelsImpl withExistingColumn(String resourceGroupName, String workspaceName, String sqlPoolName, String schemaName, String tableName, String columnName) { this.resourceGroupName = resourceGroupName; this.workspaceName = workspaceName; this.sqlPoolName = sqlPoolName; this.schemaName = schemaName; this.tableName = tableName; this.columnName = columnName; return this; } @Override public SensitivityLabelsImpl withInformationType(String informationType) { this.inner().withInformationType(informationType); return this; } @Override public SensitivityLabelsImpl withInformationTypeId(String informationTypeId) { this.inner().withInformationTypeId(informationTypeId); return this; } @Override public SensitivityLabelsImpl withLabelId(String labelId) { this.inner().withLabelId(labelId); return this; } @Override public SensitivityLabelsImpl withLabelName(String labelName) { this.inner().withLabelName(labelName); return this; } }
class SensitivityLabelsImpl extends CreatableUpdatableImpl<SensitivityLabels, SensitivityLabelInner, SensitivityLabelsImpl> implements SensitivityLabels, SensitivityLabels.Definition, SensitivityLabels.Update { private final SynapseManager manager; private String resourceGroupName; private String workspaceName; private String sqlPoolName; private String schemaName; private String tableName; private String columnName; private SensitivityLabelSource sensitivityLabelSource; SensitivityLabelsImpl(String name, SynapseManager manager) { super(name, new SensitivityLabelInner()); this.manager = manager; this.columnName = name; } SensitivityLabelsImpl(SensitivityLabelInner inner, SynapseManager manager) { super(inner.name(), inner); this.manager = manager; this.columnName = inner.name(); this.resourceGroupName = IdParsingUtils.getValueFromIdByName(inner.id(), "resourceGroups"); this.workspaceName = IdParsingUtils.getValueFromIdByName(inner.id(), "workspaces"); this.sqlPoolName = IdParsingUtils.getValueFromIdByName(inner.id(), "sqlPools"); this.schemaName = IdParsingUtils.getValueFromIdByName(inner.id(), "schemas"); this.tableName = IdParsingUtils.getValueFromIdByName(inner.id(), "tables"); this.columnName = IdParsingUtils.getValueFromIdByName(inner.id(), "columns"); this.sensitivityLabelSource = SensitivityLabelSource.fromString(IdParsingUtils.getValueFromIdByName(inner.id(), "sensitivityLabels")); } @Override public SynapseManager manager() { return this.manager; } @Override public Observable<SensitivityLabels> createResourceAsync() { SqlPoolSensitivityLabelsInner client = this.manager().inner().sqlPoolSensitivityLabels(); return client.createOrUpdateAsync(this.resourceGroupName, this.workspaceName, this.sqlPoolName, this.schemaName, this.tableName, this.columnName, this.inner()) .map(innerToFluentMap(this)); } @Override public Observable<SensitivityLabels> updateResourceAsync() { SqlPoolSensitivityLabelsInner client = this.manager().inner().sqlPoolSensitivityLabels(); return client.createOrUpdateAsync(this.resourceGroupName, this.workspaceName, this.sqlPoolName, this.schemaName, this.tableName, this.columnName, this.inner()) .map(innerToFluentMap(this)); } @Override @Override public boolean isInCreateMode() { return this.inner().id() == null; } @Override public String id() { return this.inner().id(); } @Override public String informationType() { return this.inner().informationType(); } @Override public String informationTypeId() { return this.inner().informationTypeId(); } @Override public Boolean isDisabled() { return this.inner().isDisabled(); } @Override public String labelId() { return this.inner().labelId(); } @Override public String labelName() { return this.inner().labelName(); } @Override public String name() { return this.inner().name(); } @Override public String type() { return this.inner().type(); } @Override public SensitivityLabelsImpl withExistingColumn(String resourceGroupName, String workspaceName, String sqlPoolName, String schemaName, String tableName, String columnName) { this.resourceGroupName = resourceGroupName; this.workspaceName = workspaceName; this.sqlPoolName = sqlPoolName; this.schemaName = schemaName; this.tableName = tableName; this.columnName = columnName; return this; } @Override public SensitivityLabelsImpl withInformationType(String informationType) { this.inner().withInformationType(informationType); return this; } @Override public SensitivityLabelsImpl withInformationTypeId(String informationTypeId) { this.inner().withInformationTypeId(informationTypeId); return this; } @Override public SensitivityLabelsImpl withLabelId(String labelId) { this.inner().withLabelId(labelId); return this; } @Override public SensitivityLabelsImpl withLabelName(String labelName) { this.inner().withLabelName(labelName); return this; } }
changed, parsed from id
protected Observable<SensitivityLabelInner> getInnerAsync() { SqlPoolSensitivityLabelsInner client = this.manager().inner().sqlPoolSensitivityLabels(); return client.getAsync(this.resourceGroupName, this.workspaceName, this.sqlPoolName, this.schemaName, this.tableName, this.columnName, SensitivityLabelSource.CURRENT); }
return client.getAsync(this.resourceGroupName, this.workspaceName, this.sqlPoolName, this.schemaName, this.tableName, this.columnName, SensitivityLabelSource.CURRENT);
protected Observable<SensitivityLabelInner> getInnerAsync() { SqlPoolSensitivityLabelsInner client = this.manager().inner().sqlPoolSensitivityLabels(); return client.getAsync(this.resourceGroupName, this.workspaceName, this.sqlPoolName, this.schemaName, this.tableName, this.columnName, this.sensitivityLabelSource); }
class SensitivityLabelsImpl extends CreatableUpdatableImpl<SensitivityLabels, SensitivityLabelInner, SensitivityLabelsImpl> implements SensitivityLabels, SensitivityLabels.Definition, SensitivityLabels.Update { private final SynapseManager manager; private String resourceGroupName; private String workspaceName; private String sqlPoolName; private String schemaName; private String tableName; private String columnName; SensitivityLabelsImpl(String name, SynapseManager manager) { super(name, new SensitivityLabelInner()); this.manager = manager; this.columnName = name; } SensitivityLabelsImpl(SensitivityLabelInner inner, SynapseManager manager) { super(inner.name(), inner); this.manager = manager; this.columnName = inner.name(); this.resourceGroupName = IdParsingUtils.getValueFromIdByName(inner.id(), "resourceGroups"); this.workspaceName = IdParsingUtils.getValueFromIdByName(inner.id(), "workspaces"); this.sqlPoolName = IdParsingUtils.getValueFromIdByName(inner.id(), "sqlPools"); this.schemaName = IdParsingUtils.getValueFromIdByName(inner.id(), "schemas"); this.tableName = IdParsingUtils.getValueFromIdByName(inner.id(), "tables"); this.columnName = IdParsingUtils.getValueFromIdByName(inner.id(), "columns"); } @Override public SynapseManager manager() { return this.manager; } @Override public Observable<SensitivityLabels> createResourceAsync() { SqlPoolSensitivityLabelsInner client = this.manager().inner().sqlPoolSensitivityLabels(); return client.createOrUpdateAsync(this.resourceGroupName, this.workspaceName, this.sqlPoolName, this.schemaName, this.tableName, this.columnName, this.inner()) .map(innerToFluentMap(this)); } @Override public Observable<SensitivityLabels> updateResourceAsync() { SqlPoolSensitivityLabelsInner client = this.manager().inner().sqlPoolSensitivityLabels(); return client.createOrUpdateAsync(this.resourceGroupName, this.workspaceName, this.sqlPoolName, this.schemaName, this.tableName, this.columnName, this.inner()) .map(innerToFluentMap(this)); } @Override @Override public boolean isInCreateMode() { return this.inner().id() == null; } @Override public String id() { return this.inner().id(); } @Override public String informationType() { return this.inner().informationType(); } @Override public String informationTypeId() { return this.inner().informationTypeId(); } @Override public Boolean isDisabled() { return this.inner().isDisabled(); } @Override public String labelId() { return this.inner().labelId(); } @Override public String labelName() { return this.inner().labelName(); } @Override public String name() { return this.inner().name(); } @Override public String type() { return this.inner().type(); } @Override public SensitivityLabelsImpl withExistingColumn(String resourceGroupName, String workspaceName, String sqlPoolName, String schemaName, String tableName, String columnName) { this.resourceGroupName = resourceGroupName; this.workspaceName = workspaceName; this.sqlPoolName = sqlPoolName; this.schemaName = schemaName; this.tableName = tableName; this.columnName = columnName; return this; } @Override public SensitivityLabelsImpl withInformationType(String informationType) { this.inner().withInformationType(informationType); return this; } @Override public SensitivityLabelsImpl withInformationTypeId(String informationTypeId) { this.inner().withInformationTypeId(informationTypeId); return this; } @Override public SensitivityLabelsImpl withLabelId(String labelId) { this.inner().withLabelId(labelId); return this; } @Override public SensitivityLabelsImpl withLabelName(String labelName) { this.inner().withLabelName(labelName); return this; } }
class SensitivityLabelsImpl extends CreatableUpdatableImpl<SensitivityLabels, SensitivityLabelInner, SensitivityLabelsImpl> implements SensitivityLabels, SensitivityLabels.Definition, SensitivityLabels.Update { private final SynapseManager manager; private String resourceGroupName; private String workspaceName; private String sqlPoolName; private String schemaName; private String tableName; private String columnName; private SensitivityLabelSource sensitivityLabelSource; SensitivityLabelsImpl(String name, SynapseManager manager) { super(name, new SensitivityLabelInner()); this.manager = manager; this.columnName = name; } SensitivityLabelsImpl(SensitivityLabelInner inner, SynapseManager manager) { super(inner.name(), inner); this.manager = manager; this.columnName = inner.name(); this.resourceGroupName = IdParsingUtils.getValueFromIdByName(inner.id(), "resourceGroups"); this.workspaceName = IdParsingUtils.getValueFromIdByName(inner.id(), "workspaces"); this.sqlPoolName = IdParsingUtils.getValueFromIdByName(inner.id(), "sqlPools"); this.schemaName = IdParsingUtils.getValueFromIdByName(inner.id(), "schemas"); this.tableName = IdParsingUtils.getValueFromIdByName(inner.id(), "tables"); this.columnName = IdParsingUtils.getValueFromIdByName(inner.id(), "columns"); this.sensitivityLabelSource = SensitivityLabelSource.fromString(IdParsingUtils.getValueFromIdByName(inner.id(), "sensitivityLabels")); } @Override public SynapseManager manager() { return this.manager; } @Override public Observable<SensitivityLabels> createResourceAsync() { SqlPoolSensitivityLabelsInner client = this.manager().inner().sqlPoolSensitivityLabels(); return client.createOrUpdateAsync(this.resourceGroupName, this.workspaceName, this.sqlPoolName, this.schemaName, this.tableName, this.columnName, this.inner()) .map(innerToFluentMap(this)); } @Override public Observable<SensitivityLabels> updateResourceAsync() { SqlPoolSensitivityLabelsInner client = this.manager().inner().sqlPoolSensitivityLabels(); return client.createOrUpdateAsync(this.resourceGroupName, this.workspaceName, this.sqlPoolName, this.schemaName, this.tableName, this.columnName, this.inner()) .map(innerToFluentMap(this)); } @Override @Override public boolean isInCreateMode() { return this.inner().id() == null; } @Override public String id() { return this.inner().id(); } @Override public String informationType() { return this.inner().informationType(); } @Override public String informationTypeId() { return this.inner().informationTypeId(); } @Override public Boolean isDisabled() { return this.inner().isDisabled(); } @Override public String labelId() { return this.inner().labelId(); } @Override public String labelName() { return this.inner().labelName(); } @Override public String name() { return this.inner().name(); } @Override public String type() { return this.inner().type(); } @Override public SensitivityLabelsImpl withExistingColumn(String resourceGroupName, String workspaceName, String sqlPoolName, String schemaName, String tableName, String columnName) { this.resourceGroupName = resourceGroupName; this.workspaceName = workspaceName; this.sqlPoolName = sqlPoolName; this.schemaName = schemaName; this.tableName = tableName; this.columnName = columnName; return this; } @Override public SensitivityLabelsImpl withInformationType(String informationType) { this.inner().withInformationType(informationType); return this; } @Override public SensitivityLabelsImpl withInformationTypeId(String informationTypeId) { this.inner().withInformationTypeId(informationTypeId); return this; } @Override public SensitivityLabelsImpl withLabelId(String labelId) { this.inner().withLabelId(labelId); return this; } @Override public SensitivityLabelsImpl withLabelName(String labelName) { this.inner().withLabelName(labelName); return this; } }
```suggestion "http://example.com", "http://portal.azure.com", "http://linkedin.com", "http://8.8.8.8" ```
private static Stream<Arguments> buildWithProxySupplier() { InetSocketAddress proxyAddress = new InetSocketAddress("localhost", 12345); ProxyOptions socks4Proxy = new ProxyOptions(ProxyOptions.Type.SOCKS4, proxyAddress); ProxyOptions socks5Proxy = new ProxyOptions(ProxyOptions.Type.SOCKS5, proxyAddress); ProxyOptions simpleHttpProxy = new ProxyOptions(ProxyOptions.Type.HTTP, proxyAddress); List<Arguments> arguments = new ArrayList<>(); /* * Simple non-authenticated proxies without non-proxy hosts configured. */ arguments.add(Arguments.of(true, Proxy.Type.SOCKS, socks4Proxy, defaultUrl)); arguments.add(Arguments.of(true, Proxy.Type.SOCKS, socks5Proxy, defaultUrl)); arguments.add(Arguments.of(true, Proxy.Type.HTTP, simpleHttpProxy, defaultUrl)); /* * HTTP proxy with authentication configured. */ ProxyOptions authenticatedHttpProxy = new ProxyOptions(ProxyOptions.Type.HTTP, proxyAddress) .setCredentials("1", "1"); arguments.add(Arguments.of(true, Proxy.Type.HTTP, authenticatedHttpProxy, defaultUrl)); /* * Information for non-proxy hosts testing. */ String rawNonProxyHosts = String.join("|", "localhost", "127.0.0.1", "*.microsoft.com", "*.linkedin.com"); String[] requestUrlsWithoutProxying = new String[]{ "http: }; String[] requestUrlsWithProxying = new String[]{ "http: }; /* * HTTP proxies with non-proxy hosts configured. */ Supplier<ProxyOptions> nonProxyHostsSupplier = () -> new ProxyOptions(ProxyOptions.Type.HTTP, proxyAddress).setNonProxyHosts(rawNonProxyHosts); for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, Proxy.Type.HTTP, nonProxyHostsSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, Proxy.Type.HTTP, nonProxyHostsSupplier.get(), requestUrl)); } /* * HTTP proxies with authentication and non-proxy hosts configured. */ Supplier<ProxyOptions> authenticatedNonProxyHostsSupplier = () -> nonProxyHostsSupplier.get() .setCredentials("1", "1"); for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, Proxy.Type.HTTP, authenticatedNonProxyHostsSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, Proxy.Type.HTTP, authenticatedNonProxyHostsSupplier.get(), requestUrl)); } return arguments.stream(); }
"http:
private static Stream<Arguments> buildWithProxySupplier() { InetSocketAddress proxyAddress = new InetSocketAddress("localhost", 12345); ProxyOptions socks4Proxy = new ProxyOptions(ProxyOptions.Type.SOCKS4, proxyAddress); ProxyOptions socks5Proxy = new ProxyOptions(ProxyOptions.Type.SOCKS5, proxyAddress); ProxyOptions simpleHttpProxy = new ProxyOptions(ProxyOptions.Type.HTTP, proxyAddress); List<Arguments> arguments = new ArrayList<>(); /* * Simple non-authenticated proxies without non-proxy hosts configured. */ arguments.add(Arguments.of(true, Proxy.Type.SOCKS, socks4Proxy, defaultUrl)); arguments.add(Arguments.of(true, Proxy.Type.SOCKS, socks5Proxy, defaultUrl)); arguments.add(Arguments.of(true, Proxy.Type.HTTP, simpleHttpProxy, defaultUrl)); /* * HTTP proxy with authentication configured. */ ProxyOptions authenticatedHttpProxy = new ProxyOptions(ProxyOptions.Type.HTTP, proxyAddress) .setCredentials("1", "1"); arguments.add(Arguments.of(true, Proxy.Type.HTTP, authenticatedHttpProxy, defaultUrl)); /* * Information for non-proxy hosts testing. */ String rawNonProxyHosts = String.join("|", "localhost", "127.0.0.1", "*.microsoft.com", "*.linkedin.com"); String[] requestUrlsWithoutProxying = new String[]{ "http: }; String[] requestUrlsWithProxying = new String[]{ "http: }; /* * HTTP proxies with non-proxy hosts configured. */ Supplier<ProxyOptions> nonProxyHostsSupplier = () -> new ProxyOptions(ProxyOptions.Type.HTTP, proxyAddress).setNonProxyHosts(rawNonProxyHosts); for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, Proxy.Type.HTTP, nonProxyHostsSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, Proxy.Type.HTTP, nonProxyHostsSupplier.get(), requestUrl)); } /* * HTTP proxies with authentication and non-proxy hosts configured. */ Supplier<ProxyOptions> authenticatedNonProxyHostsSupplier = () -> nonProxyHostsSupplier.get() .setCredentials("1", "1"); for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, Proxy.Type.HTTP, authenticatedNonProxyHostsSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, Proxy.Type.HTTP, authenticatedNonProxyHostsSupplier.get(), requestUrl)); } return arguments.stream(); }
class OkHttpAsyncHttpClientBuilderTests { private static final String COOKIE_VALIDATOR_PATH = "/cookieValidator"; private static final String DEFAULT_PATH = "/default"; private static final String DISPATCHER_PATH = "/dispatcher"; private static final String JAVA_PROXY_PREREQUISITE = "java.net.useSystemProxies"; private static final String JAVA_NON_PROXY_HOSTS = "http.nonProxyHosts"; private static final String JAVA_HTTP_PROXY_HOST = "http.proxyHost"; private static final String JAVA_HTTP_PROXY_PORT = "http.proxyPort"; private static final String JAVA_HTTP_PROXY_USER = "http.proxyUser"; private static final String JAVA_HTTP_PROXY_PASSWORD = "http.proxyPassword"; private static WireMockServer server; private static String cookieValidatorUrl; private static String defaultUrl; private static String dispatcherUrl; @BeforeAll public static void setupWireMock() { server = new WireMockServer(WireMockConfiguration.options().dynamicPort().disableRequestJournal()); server.stubFor(WireMock.get(COOKIE_VALIDATOR_PATH).withCookie("test", WireMock.matching("success")) .willReturn(WireMock.aResponse().withStatus(200))); server.stubFor(WireMock.get(DEFAULT_PATH).willReturn(WireMock.aResponse().withStatus(200))); server.stubFor(WireMock.get(DISPATCHER_PATH).willReturn(WireMock.aResponse().withStatus(200) .withFixedDelay(5000))); server.start(); cookieValidatorUrl = "http: defaultUrl = "http: dispatcherUrl = "http: } @AfterAll public static void shutdownWireMock() { if (server.isRunning()) { server.shutdown(); } } /** * Tests that an {@link OkHttpAsyncHttpClient} is able to be built from an existing {@link OkHttpClient}. */ @Test public void buildClientWithExistingClient() { OkHttpClient existingClient = new OkHttpClient.Builder() .addInterceptor(chain -> chain .proceed(chain.request().newBuilder().addHeader("Cookie", "test=success").build())) .build(); HttpClient client = new OkHttpAsyncHttpClientBuilder(existingClient).build(); StepVerifier.create(client.send(new HttpRequest(HttpMethod.GET, cookieValidatorUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that instantiating an {@link OkHttpAsyncHttpClientBuilder} with a {@code null} {@link OkHttpClient} will * throw a {@link NullPointerException}. */ @Test public void startingWithNullClientThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder(null)); } /** * Tests that adding an {@link Interceptor} is handled correctly. */ @Test public void addNetworkInterceptor() { Interceptor testInterceptor = chain -> chain.proceed(chain.request().newBuilder() .addHeader("Cookie", "test=success").build()); HttpClient client = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(testInterceptor) .build(); StepVerifier.create(client.send(new HttpRequest(HttpMethod.GET, cookieValidatorUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that adding a {@code null} {@link Interceptor} will throw a {@link NullPointerException}. */ @Test public void nullNetworkInterceptorThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().addNetworkInterceptor(null)); } /** * Tests that the {@link Interceptor interceptors} in the client are replace-able by setting a new list of * interceptors. */ @Test public void setNetworkInterceptors() { Interceptor badCookieSetter = chain -> chain.proceed(chain.request().newBuilder() .addHeader("Cookie", "test=failure").build()); Interceptor goodCookieSetter = chain -> chain.proceed(chain.request().newBuilder() .addHeader("Cookie", "test=success").build()); HttpClient client = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(badCookieSetter) .networkInterceptors(Collections.singletonList(goodCookieSetter)) .build(); StepVerifier.create(client.send(new HttpRequest(HttpMethod.GET, cookieValidatorUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that setting the {@link Interceptor interceptors} to {@code null} will throw a {@link * NullPointerException}. */ @Test public void nullNetworkInterceptorsThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().networkInterceptors(null)); } /** * Tests building a client with a given {@code connectionTimeout}. */ @Test public void buildWithConnectionTimeout() { int expectedConnectionTimeoutMillis = 3600 * 1000; Interceptor validatorInterceptor = chain -> { assertEquals(expectedConnectionTimeoutMillis, chain.connectTimeoutMillis()); return chain.proceed(chain.request()); }; HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(validatorInterceptor) .connectionTimeout(Duration.ofSeconds(3600)) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests building a client with a given {@code connectionTimeout}. */ @Test public void buildWithReadTimeout() { int expectedReadTimeoutMillis = 3600 * 1000; Interceptor validatorInterceptor = chain -> { assertEquals(expectedReadTimeoutMillis, chain.readTimeoutMillis()); return chain.proceed(chain.request()); }; HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(validatorInterceptor) .readTimeout(Duration.ofSeconds(3600)) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests building a client with a given {@code connectionPool}. */ @Test public void buildWithConnectionPool() { ConnectionPool connectionPool = new ConnectionPool(); HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .connectionPool(connectionPool) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); assertEquals(1, connectionPool.connectionCount()); } /** * Tests that passing a {@code null} {@code connectionPool} to the builder will throw a {@link * NullPointerException}. */ @Test public void nullConnectionPoolThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().connectionPool(null)); } /** * Tests building a client with a given {@code dispatcher}. */ @Test public void buildWithDispatcher() { String expectedThreadName = "testDispatcher"; Dispatcher dispatcher = new Dispatcher(Executors .newFixedThreadPool(1, (Runnable r) -> new Thread(r, expectedThreadName))); HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .dispatcher(dispatcher) .build(); /* * Schedule a task that will run in one second to cancel all requests sent using the dispatcher. This should * result in the request we are about to send to be cancelled since WireMock will wait 5 seconds before * returning a response. */ new Timer().schedule(new TimerTask() { @Override public void run() { assertEquals(1, dispatcher.runningCallsCount()); dispatcher.cancelAll(); } }, 1000); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, dispatcherUrl))) .verifyError(); } /** * Tests that passing a {@code null} {@code dispatcher} to the builder will throw a {@link NullPointerException}. */ @Test public void nullDispatcherThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().dispatcher(null)); } /** * Tests that building a client with a proxy will send the request through the proxy server. */ @ParameterizedTest @MethodSource("buildWithProxySupplier") public void buildWithProxy(boolean shouldHaveProxy, Proxy.Type proxyType, ProxyOptions proxyOptions, String requestUrl) { OkHttpClient validatorClient = okHttpClientWithProxyValidation(shouldHaveProxy, proxyType); HttpClient okClient = new OkHttpAsyncHttpClientBuilder(validatorClient) .proxy(proxyOptions) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, requestUrl))) .verifyErrorMatches(throwable -> throwable.getMessage() .contains(TestEventListenerValidator.EXPECTED_EXCEPTION_MESSAGE)); } @Test public void buildWithConfigurationNone() { HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .configuration(Configuration.NONE) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } @ParameterizedTest @MethodSource("buildWithConfigurationProxySupplier") public void buildWithConfigurationProxy(boolean shouldHaveProxy, Configuration configuration, String requestUrl) { OkHttpClient validatorClient = okHttpClientWithProxyValidation(shouldHaveProxy, Proxy.Type.HTTP); HttpClient okClient = new OkHttpAsyncHttpClientBuilder(validatorClient) .configuration(configuration) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, requestUrl))) .verifyErrorMatches(throwable -> throwable.getMessage() .contains(TestEventListenerValidator.EXPECTED_EXCEPTION_MESSAGE)); } private static Stream<Arguments> buildWithConfigurationProxySupplier() { Supplier<Configuration> baseJavaProxyConfigurationSupplier = () -> new Configuration() .put(JAVA_PROXY_PREREQUISITE, "true") .put(JAVA_HTTP_PROXY_HOST, "localhost") .put(JAVA_HTTP_PROXY_PORT, "12345"); List<Arguments> arguments = new ArrayList<>(); /* * Simple non-authenticated HTTP proxies. */ arguments.add(Arguments.of(true, baseJavaProxyConfigurationSupplier.get(), defaultUrl)); Configuration simpleEnvProxy = new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: arguments.add(Arguments.of(true, simpleEnvProxy, defaultUrl)); /* * HTTP proxy with authentication configured. */ Configuration javaProxyWithAuthentication = baseJavaProxyConfigurationSupplier.get() .put(JAVA_HTTP_PROXY_USER, "1") .put(JAVA_HTTP_PROXY_PASSWORD, "1"); arguments.add(Arguments.of(true, javaProxyWithAuthentication, defaultUrl)); Configuration envProxyWithAuthentication = new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: arguments.add(Arguments.of(true, envProxyWithAuthentication, defaultUrl)); /* * Information for non-proxy hosts testing. */ String rawJavaNonProxyHosts = String.join("|", "localhost", "127.0.0.1", "*.microsoft.com", "*.linkedin.com"); String rawEnvNonProxyHosts = String.join(",", "localhost", "127.0.0.1", "*.microsoft.com", "*.linkedin.com"); String[] requestUrlsWithoutProxying = new String[]{ "http: }; String[] requestUrlsWithProxying = new String[]{ "http: }; /* * HTTP proxies with non-proxy hosts configured. */ Supplier<Configuration> javaNonProxyHostsSupplier = () -> baseJavaProxyConfigurationSupplier.get() .put(JAVA_NON_PROXY_HOSTS, rawJavaNonProxyHosts); Supplier<Configuration> envNonProxyHostsSupplier = () -> new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: .put(Configuration.PROPERTY_NO_PROXY, rawEnvNonProxyHosts); List<Supplier<Configuration>> nonProxyHostsSuppliers = Arrays.asList(javaNonProxyHostsSupplier, envNonProxyHostsSupplier); for (Supplier<Configuration> configurationSupplier : nonProxyHostsSuppliers) { for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, configurationSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, configurationSupplier.get(), requestUrl)); } } /* * HTTP proxies with authentication and non-proxy hosts configured. */ Supplier<Configuration> authenticatedJavaNonProxyHostsSupplier = () -> javaNonProxyHostsSupplier.get() .put(JAVA_HTTP_PROXY_USER, "1") .put(JAVA_HTTP_PROXY_PASSWORD, "1"); Supplier<Configuration> authenticatedEnvNonProxyHostsSupplier = () -> new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: .put(Configuration.PROPERTY_NO_PROXY, rawEnvNonProxyHosts); List<Supplier<Configuration>> authenticatedNonProxyHostsSuppliers = Arrays.asList( authenticatedJavaNonProxyHostsSupplier, authenticatedEnvNonProxyHostsSupplier); for (Supplier<Configuration> configurationSupplier : authenticatedNonProxyHostsSuppliers) { for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, configurationSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, configurationSupplier.get(), requestUrl)); } } return arguments.stream(); } private static OkHttpClient okHttpClientWithProxyValidation(boolean shouldHaveProxy, Proxy.Type proxyType) { return new OkHttpClient.Builder() .eventListener(new TestEventListenerValidator(shouldHaveProxy, proxyType)) .build(); } private static final class TestEventListenerValidator extends EventListener { private static final String EXPECTED_EXCEPTION_MESSAGE = "This is a local test so we cannot connect to remote " + "hosts eagerly. This is exception is expected."; private static final RuntimeException EXPECTED_EXCEPTION = new RuntimeException(EXPECTED_EXCEPTION_MESSAGE); private final boolean shouldHaveProxy; private final Proxy.Type proxyType; private TestEventListenerValidator(boolean shouldHaveProxy, Proxy.Type proxyType) { this.shouldHaveProxy = shouldHaveProxy; this.proxyType = proxyType; } @Override public void connectStart(Call call, InetSocketAddress inetSocketAddress, Proxy proxy) { RuntimeException exception = EXPECTED_EXCEPTION; try { if (shouldHaveProxy) { assertNotNull(proxy.address()); assertEquals(proxyType, proxy.type()); } else { assertEquals(Proxy.NO_PROXY, proxy); } } catch (Throwable throwable) { exception = new RuntimeException(throwable); } throw exception; } } }
class OkHttpAsyncHttpClientBuilderTests { private static final String COOKIE_VALIDATOR_PATH = "/cookieValidator"; private static final String DEFAULT_PATH = "/default"; private static final String DISPATCHER_PATH = "/dispatcher"; private static final String JAVA_PROXY_PREREQUISITE = "java.net.useSystemProxies"; private static final String JAVA_NON_PROXY_HOSTS = "http.nonProxyHosts"; private static final String JAVA_HTTP_PROXY_HOST = "http.proxyHost"; private static final String JAVA_HTTP_PROXY_PORT = "http.proxyPort"; private static final String JAVA_HTTP_PROXY_USER = "http.proxyUser"; private static final String JAVA_HTTP_PROXY_PASSWORD = "http.proxyPassword"; private static WireMockServer server; private static String cookieValidatorUrl; private static String defaultUrl; private static String dispatcherUrl; @BeforeAll public static void setupWireMock() { server = new WireMockServer(WireMockConfiguration.options().dynamicPort().disableRequestJournal()); server.stubFor(WireMock.get(COOKIE_VALIDATOR_PATH).withCookie("test", WireMock.matching("success")) .willReturn(WireMock.aResponse().withStatus(200))); server.stubFor(WireMock.get(DEFAULT_PATH).willReturn(WireMock.aResponse().withStatus(200))); server.stubFor(WireMock.get(DISPATCHER_PATH).willReturn(WireMock.aResponse().withStatus(200) .withFixedDelay(5000))); server.start(); cookieValidatorUrl = "http: defaultUrl = "http: dispatcherUrl = "http: } @AfterAll public static void shutdownWireMock() { if (server.isRunning()) { server.shutdown(); } } /** * Tests that an {@link OkHttpAsyncHttpClient} is able to be built from an existing {@link OkHttpClient}. */ @Test public void buildClientWithExistingClient() { OkHttpClient existingClient = new OkHttpClient.Builder() .addInterceptor(chain -> chain .proceed(chain.request().newBuilder().addHeader("Cookie", "test=success").build())) .build(); HttpClient client = new OkHttpAsyncHttpClientBuilder(existingClient).build(); StepVerifier.create(client.send(new HttpRequest(HttpMethod.GET, cookieValidatorUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that instantiating an {@link OkHttpAsyncHttpClientBuilder} with a {@code null} {@link OkHttpClient} will * throw a {@link NullPointerException}. */ @Test public void startingWithNullClientThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder(null)); } /** * Tests that adding an {@link Interceptor} is handled correctly. */ @Test public void addNetworkInterceptor() { Interceptor testInterceptor = chain -> chain.proceed(chain.request().newBuilder() .addHeader("Cookie", "test=success").build()); HttpClient client = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(testInterceptor) .build(); StepVerifier.create(client.send(new HttpRequest(HttpMethod.GET, cookieValidatorUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that adding a {@code null} {@link Interceptor} will throw a {@link NullPointerException}. */ @Test public void nullNetworkInterceptorThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().addNetworkInterceptor(null)); } /** * Tests that the {@link Interceptor interceptors} in the client are replace-able by setting a new list of * interceptors. */ @Test public void setNetworkInterceptors() { Interceptor badCookieSetter = chain -> chain.proceed(chain.request().newBuilder() .addHeader("Cookie", "test=failure").build()); Interceptor goodCookieSetter = chain -> chain.proceed(chain.request().newBuilder() .addHeader("Cookie", "test=success").build()); HttpClient client = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(badCookieSetter) .networkInterceptors(Collections.singletonList(goodCookieSetter)) .build(); StepVerifier.create(client.send(new HttpRequest(HttpMethod.GET, cookieValidatorUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that setting the {@link Interceptor interceptors} to {@code null} will throw a {@link * NullPointerException}. */ @Test public void nullNetworkInterceptorsThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().networkInterceptors(null)); } /** * Tests building a client with a given {@code connectionTimeout}. */ @Test public void buildWithConnectionTimeout() { int expectedConnectionTimeoutMillis = 3600 * 1000; Interceptor validatorInterceptor = chain -> { assertEquals(expectedConnectionTimeoutMillis, chain.connectTimeoutMillis()); return chain.proceed(chain.request()); }; HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(validatorInterceptor) .connectionTimeout(Duration.ofSeconds(3600)) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests building a client with a given {@code connectionTimeout}. */ @Test public void buildWithReadTimeout() { int expectedReadTimeoutMillis = 3600 * 1000; Interceptor validatorInterceptor = chain -> { assertEquals(expectedReadTimeoutMillis, chain.readTimeoutMillis()); return chain.proceed(chain.request()); }; HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(validatorInterceptor) .readTimeout(Duration.ofSeconds(3600)) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests building a client with a given {@code connectionPool}. */ @Test public void buildWithConnectionPool() { ConnectionPool connectionPool = new ConnectionPool(); HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .connectionPool(connectionPool) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); assertEquals(1, connectionPool.connectionCount()); } /** * Tests that passing a {@code null} {@code connectionPool} to the builder will throw a {@link * NullPointerException}. */ @Test public void nullConnectionPoolThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().connectionPool(null)); } /** * Tests building a client with a given {@code dispatcher}. */ @Test public void buildWithDispatcher() { String expectedThreadName = "testDispatcher"; Dispatcher dispatcher = new Dispatcher(Executors .newFixedThreadPool(1, (Runnable r) -> new Thread(r, expectedThreadName))); HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .dispatcher(dispatcher) .build(); /* * Schedule a task that will run in one second to cancel all requests sent using the dispatcher. This should * result in the request we are about to send to be cancelled since WireMock will wait 5 seconds before * returning a response. */ new Timer().schedule(new TimerTask() { @Override public void run() { assertEquals(1, dispatcher.runningCallsCount()); dispatcher.cancelAll(); } }, 1000); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, dispatcherUrl))) .verifyError(); } /** * Tests that passing a {@code null} {@code dispatcher} to the builder will throw a {@link NullPointerException}. */ @Test public void nullDispatcherThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().dispatcher(null)); } /** * Tests that building a client with a proxy will send the request through the proxy server. */ @ParameterizedTest @MethodSource("buildWithProxySupplier") public void buildWithProxy(boolean shouldHaveProxy, Proxy.Type proxyType, ProxyOptions proxyOptions, String requestUrl) { OkHttpClient validatorClient = okHttpClientWithProxyValidation(shouldHaveProxy, proxyType); HttpClient okClient = new OkHttpAsyncHttpClientBuilder(validatorClient) .proxy(proxyOptions) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, requestUrl))) .verifyErrorMatches(throwable -> throwable.getMessage() .contains(TestEventListenerValidator.EXPECTED_EXCEPTION_MESSAGE)); } @Test public void buildWithConfigurationNone() { HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .configuration(Configuration.NONE) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } @ParameterizedTest @MethodSource("buildWithConfigurationProxySupplier") public void buildWithConfigurationProxy(boolean shouldHaveProxy, Configuration configuration, String requestUrl) { OkHttpClient validatorClient = okHttpClientWithProxyValidation(shouldHaveProxy, Proxy.Type.HTTP); HttpClient okClient = new OkHttpAsyncHttpClientBuilder(validatorClient) .configuration(configuration) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, requestUrl))) .verifyErrorMatches(throwable -> throwable.getMessage() .contains(TestEventListenerValidator.EXPECTED_EXCEPTION_MESSAGE)); } private static Stream<Arguments> buildWithConfigurationProxySupplier() { Supplier<Configuration> baseJavaProxyConfigurationSupplier = () -> new Configuration() .put(JAVA_PROXY_PREREQUISITE, "true") .put(JAVA_HTTP_PROXY_HOST, "localhost") .put(JAVA_HTTP_PROXY_PORT, "12345"); List<Arguments> arguments = new ArrayList<>(); /* * Simple non-authenticated HTTP proxies. */ arguments.add(Arguments.of(true, baseJavaProxyConfigurationSupplier.get(), defaultUrl)); Configuration simpleEnvProxy = new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: arguments.add(Arguments.of(true, simpleEnvProxy, defaultUrl)); /* * HTTP proxy with authentication configured. */ Configuration javaProxyWithAuthentication = baseJavaProxyConfigurationSupplier.get() .put(JAVA_HTTP_PROXY_USER, "1") .put(JAVA_HTTP_PROXY_PASSWORD, "1"); arguments.add(Arguments.of(true, javaProxyWithAuthentication, defaultUrl)); Configuration envProxyWithAuthentication = new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: arguments.add(Arguments.of(true, envProxyWithAuthentication, defaultUrl)); /* * Information for non-proxy hosts testing. */ String rawJavaNonProxyHosts = String.join("|", "localhost", "127.0.0.1", "*.microsoft.com", "*.linkedin.com"); String rawEnvNonProxyHosts = String.join(",", "localhost", "127.0.0.1", "*.microsoft.com", "*.linkedin.com"); String[] requestUrlsWithoutProxying = new String[]{ "http: }; String[] requestUrlsWithProxying = new String[]{ "http: }; /* * HTTP proxies with non-proxy hosts configured. */ Supplier<Configuration> javaNonProxyHostsSupplier = () -> baseJavaProxyConfigurationSupplier.get() .put(JAVA_NON_PROXY_HOSTS, rawJavaNonProxyHosts); Supplier<Configuration> envNonProxyHostsSupplier = () -> new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: .put(Configuration.PROPERTY_NO_PROXY, rawEnvNonProxyHosts); List<Supplier<Configuration>> nonProxyHostsSuppliers = Arrays.asList(javaNonProxyHostsSupplier, envNonProxyHostsSupplier); for (Supplier<Configuration> configurationSupplier : nonProxyHostsSuppliers) { for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, configurationSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, configurationSupplier.get(), requestUrl)); } } /* * HTTP proxies with authentication and non-proxy hosts configured. */ Supplier<Configuration> authenticatedJavaNonProxyHostsSupplier = () -> javaNonProxyHostsSupplier.get() .put(JAVA_HTTP_PROXY_USER, "1") .put(JAVA_HTTP_PROXY_PASSWORD, "1"); Supplier<Configuration> authenticatedEnvNonProxyHostsSupplier = () -> new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: .put(Configuration.PROPERTY_NO_PROXY, rawEnvNonProxyHosts); List<Supplier<Configuration>> authenticatedNonProxyHostsSuppliers = Arrays.asList( authenticatedJavaNonProxyHostsSupplier, authenticatedEnvNonProxyHostsSupplier); for (Supplier<Configuration> configurationSupplier : authenticatedNonProxyHostsSuppliers) { for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, configurationSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, configurationSupplier.get(), requestUrl)); } } return arguments.stream(); } private static OkHttpClient okHttpClientWithProxyValidation(boolean shouldHaveProxy, Proxy.Type proxyType) { return new OkHttpClient.Builder() .eventListener(new TestEventListenerValidator(shouldHaveProxy, proxyType)) .build(); } private static final class TestEventListenerValidator extends EventListener { private static final String EXPECTED_EXCEPTION_MESSAGE = "This is a local test so we cannot connect to remote " + "hosts eagerly. This is exception is expected."; private static final RuntimeException EXPECTED_EXCEPTION = new RuntimeException(EXPECTED_EXCEPTION_MESSAGE); private final boolean shouldHaveProxy; private final Proxy.Type proxyType; private TestEventListenerValidator(boolean shouldHaveProxy, Proxy.Type proxyType) { this.shouldHaveProxy = shouldHaveProxy; this.proxyType = proxyType; } @Override public void connectStart(Call call, InetSocketAddress inetSocketAddress, Proxy proxy) { RuntimeException exception = EXPECTED_EXCEPTION; try { if (shouldHaveProxy) { assertNotNull(proxy.address()); assertEquals(proxyType, proxy.type()); } else { assertEquals(Proxy.NO_PROXY, proxy); } } catch (Throwable throwable) { exception = new RuntimeException(throwable); } throw exception; } } }
Given that we are now using `find()` instead of `matches()`, can "*.microsoft.com" just be ".microsoft.com"? The `*` prefix may not be necessary now if that was the intention to switch to `find()`.
private static Stream<Arguments> buildWithProxySupplier() { InetSocketAddress proxyAddress = new InetSocketAddress("localhost", 12345); ProxyOptions socks4Proxy = new ProxyOptions(ProxyOptions.Type.SOCKS4, proxyAddress); ProxyOptions socks5Proxy = new ProxyOptions(ProxyOptions.Type.SOCKS5, proxyAddress); ProxyOptions simpleHttpProxy = new ProxyOptions(ProxyOptions.Type.HTTP, proxyAddress); List<Arguments> arguments = new ArrayList<>(); /* * Simple non-authenticated proxies without non-proxy hosts configured. */ arguments.add(Arguments.of(true, Proxy.Type.SOCKS, socks4Proxy, defaultUrl)); arguments.add(Arguments.of(true, Proxy.Type.SOCKS, socks5Proxy, defaultUrl)); arguments.add(Arguments.of(true, Proxy.Type.HTTP, simpleHttpProxy, defaultUrl)); /* * HTTP proxy with authentication configured. */ ProxyOptions authenticatedHttpProxy = new ProxyOptions(ProxyOptions.Type.HTTP, proxyAddress) .setCredentials("1", "1"); arguments.add(Arguments.of(true, Proxy.Type.HTTP, authenticatedHttpProxy, defaultUrl)); /* * Information for non-proxy hosts testing. */ String rawNonProxyHosts = String.join("|", "localhost", "127.0.0.1", "*.microsoft.com", "*.linkedin.com"); String[] requestUrlsWithoutProxying = new String[]{ "http: }; String[] requestUrlsWithProxying = new String[]{ "http: }; /* * HTTP proxies with non-proxy hosts configured. */ Supplier<ProxyOptions> nonProxyHostsSupplier = () -> new ProxyOptions(ProxyOptions.Type.HTTP, proxyAddress).setNonProxyHosts(rawNonProxyHosts); for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, Proxy.Type.HTTP, nonProxyHostsSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, Proxy.Type.HTTP, nonProxyHostsSupplier.get(), requestUrl)); } /* * HTTP proxies with authentication and non-proxy hosts configured. */ Supplier<ProxyOptions> authenticatedNonProxyHostsSupplier = () -> nonProxyHostsSupplier.get() .setCredentials("1", "1"); for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, Proxy.Type.HTTP, authenticatedNonProxyHostsSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, Proxy.Type.HTTP, authenticatedNonProxyHostsSupplier.get(), requestUrl)); } return arguments.stream(); }
String rawNonProxyHosts = String.join("|", "localhost", "127.0.0.1", "*.microsoft.com", "*.linkedin.com");
private static Stream<Arguments> buildWithProxySupplier() { InetSocketAddress proxyAddress = new InetSocketAddress("localhost", 12345); ProxyOptions socks4Proxy = new ProxyOptions(ProxyOptions.Type.SOCKS4, proxyAddress); ProxyOptions socks5Proxy = new ProxyOptions(ProxyOptions.Type.SOCKS5, proxyAddress); ProxyOptions simpleHttpProxy = new ProxyOptions(ProxyOptions.Type.HTTP, proxyAddress); List<Arguments> arguments = new ArrayList<>(); /* * Simple non-authenticated proxies without non-proxy hosts configured. */ arguments.add(Arguments.of(true, Proxy.Type.SOCKS, socks4Proxy, defaultUrl)); arguments.add(Arguments.of(true, Proxy.Type.SOCKS, socks5Proxy, defaultUrl)); arguments.add(Arguments.of(true, Proxy.Type.HTTP, simpleHttpProxy, defaultUrl)); /* * HTTP proxy with authentication configured. */ ProxyOptions authenticatedHttpProxy = new ProxyOptions(ProxyOptions.Type.HTTP, proxyAddress) .setCredentials("1", "1"); arguments.add(Arguments.of(true, Proxy.Type.HTTP, authenticatedHttpProxy, defaultUrl)); /* * Information for non-proxy hosts testing. */ String rawNonProxyHosts = String.join("|", "localhost", "127.0.0.1", "*.microsoft.com", "*.linkedin.com"); String[] requestUrlsWithoutProxying = new String[]{ "http: }; String[] requestUrlsWithProxying = new String[]{ "http: }; /* * HTTP proxies with non-proxy hosts configured. */ Supplier<ProxyOptions> nonProxyHostsSupplier = () -> new ProxyOptions(ProxyOptions.Type.HTTP, proxyAddress).setNonProxyHosts(rawNonProxyHosts); for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, Proxy.Type.HTTP, nonProxyHostsSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, Proxy.Type.HTTP, nonProxyHostsSupplier.get(), requestUrl)); } /* * HTTP proxies with authentication and non-proxy hosts configured. */ Supplier<ProxyOptions> authenticatedNonProxyHostsSupplier = () -> nonProxyHostsSupplier.get() .setCredentials("1", "1"); for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, Proxy.Type.HTTP, authenticatedNonProxyHostsSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, Proxy.Type.HTTP, authenticatedNonProxyHostsSupplier.get(), requestUrl)); } return arguments.stream(); }
class OkHttpAsyncHttpClientBuilderTests { private static final String COOKIE_VALIDATOR_PATH = "/cookieValidator"; private static final String DEFAULT_PATH = "/default"; private static final String DISPATCHER_PATH = "/dispatcher"; private static final String JAVA_PROXY_PREREQUISITE = "java.net.useSystemProxies"; private static final String JAVA_NON_PROXY_HOSTS = "http.nonProxyHosts"; private static final String JAVA_HTTP_PROXY_HOST = "http.proxyHost"; private static final String JAVA_HTTP_PROXY_PORT = "http.proxyPort"; private static final String JAVA_HTTP_PROXY_USER = "http.proxyUser"; private static final String JAVA_HTTP_PROXY_PASSWORD = "http.proxyPassword"; private static WireMockServer server; private static String cookieValidatorUrl; private static String defaultUrl; private static String dispatcherUrl; @BeforeAll public static void setupWireMock() { server = new WireMockServer(WireMockConfiguration.options().dynamicPort().disableRequestJournal()); server.stubFor(WireMock.get(COOKIE_VALIDATOR_PATH).withCookie("test", WireMock.matching("success")) .willReturn(WireMock.aResponse().withStatus(200))); server.stubFor(WireMock.get(DEFAULT_PATH).willReturn(WireMock.aResponse().withStatus(200))); server.stubFor(WireMock.get(DISPATCHER_PATH).willReturn(WireMock.aResponse().withStatus(200) .withFixedDelay(5000))); server.start(); cookieValidatorUrl = "http: defaultUrl = "http: dispatcherUrl = "http: } @AfterAll public static void shutdownWireMock() { if (server.isRunning()) { server.shutdown(); } } /** * Tests that an {@link OkHttpAsyncHttpClient} is able to be built from an existing {@link OkHttpClient}. */ @Test public void buildClientWithExistingClient() { OkHttpClient existingClient = new OkHttpClient.Builder() .addInterceptor(chain -> chain .proceed(chain.request().newBuilder().addHeader("Cookie", "test=success").build())) .build(); HttpClient client = new OkHttpAsyncHttpClientBuilder(existingClient).build(); StepVerifier.create(client.send(new HttpRequest(HttpMethod.GET, cookieValidatorUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that instantiating an {@link OkHttpAsyncHttpClientBuilder} with a {@code null} {@link OkHttpClient} will * throw a {@link NullPointerException}. */ @Test public void startingWithNullClientThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder(null)); } /** * Tests that adding an {@link Interceptor} is handled correctly. */ @Test public void addNetworkInterceptor() { Interceptor testInterceptor = chain -> chain.proceed(chain.request().newBuilder() .addHeader("Cookie", "test=success").build()); HttpClient client = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(testInterceptor) .build(); StepVerifier.create(client.send(new HttpRequest(HttpMethod.GET, cookieValidatorUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that adding a {@code null} {@link Interceptor} will throw a {@link NullPointerException}. */ @Test public void nullNetworkInterceptorThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().addNetworkInterceptor(null)); } /** * Tests that the {@link Interceptor interceptors} in the client are replace-able by setting a new list of * interceptors. */ @Test public void setNetworkInterceptors() { Interceptor badCookieSetter = chain -> chain.proceed(chain.request().newBuilder() .addHeader("Cookie", "test=failure").build()); Interceptor goodCookieSetter = chain -> chain.proceed(chain.request().newBuilder() .addHeader("Cookie", "test=success").build()); HttpClient client = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(badCookieSetter) .networkInterceptors(Collections.singletonList(goodCookieSetter)) .build(); StepVerifier.create(client.send(new HttpRequest(HttpMethod.GET, cookieValidatorUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that setting the {@link Interceptor interceptors} to {@code null} will throw a {@link * NullPointerException}. */ @Test public void nullNetworkInterceptorsThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().networkInterceptors(null)); } /** * Tests building a client with a given {@code connectionTimeout}. */ @Test public void buildWithConnectionTimeout() { int expectedConnectionTimeoutMillis = 3600 * 1000; Interceptor validatorInterceptor = chain -> { assertEquals(expectedConnectionTimeoutMillis, chain.connectTimeoutMillis()); return chain.proceed(chain.request()); }; HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(validatorInterceptor) .connectionTimeout(Duration.ofSeconds(3600)) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests building a client with a given {@code connectionTimeout}. */ @Test public void buildWithReadTimeout() { int expectedReadTimeoutMillis = 3600 * 1000; Interceptor validatorInterceptor = chain -> { assertEquals(expectedReadTimeoutMillis, chain.readTimeoutMillis()); return chain.proceed(chain.request()); }; HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(validatorInterceptor) .readTimeout(Duration.ofSeconds(3600)) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests building a client with a given {@code connectionPool}. */ @Test public void buildWithConnectionPool() { ConnectionPool connectionPool = new ConnectionPool(); HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .connectionPool(connectionPool) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); assertEquals(1, connectionPool.connectionCount()); } /** * Tests that passing a {@code null} {@code connectionPool} to the builder will throw a {@link * NullPointerException}. */ @Test public void nullConnectionPoolThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().connectionPool(null)); } /** * Tests building a client with a given {@code dispatcher}. */ @Test public void buildWithDispatcher() { String expectedThreadName = "testDispatcher"; Dispatcher dispatcher = new Dispatcher(Executors .newFixedThreadPool(1, (Runnable r) -> new Thread(r, expectedThreadName))); HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .dispatcher(dispatcher) .build(); /* * Schedule a task that will run in one second to cancel all requests sent using the dispatcher. This should * result in the request we are about to send to be cancelled since WireMock will wait 5 seconds before * returning a response. */ new Timer().schedule(new TimerTask() { @Override public void run() { assertEquals(1, dispatcher.runningCallsCount()); dispatcher.cancelAll(); } }, 1000); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, dispatcherUrl))) .verifyError(); } /** * Tests that passing a {@code null} {@code dispatcher} to the builder will throw a {@link NullPointerException}. */ @Test public void nullDispatcherThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().dispatcher(null)); } /** * Tests that building a client with a proxy will send the request through the proxy server. */ @ParameterizedTest @MethodSource("buildWithProxySupplier") public void buildWithProxy(boolean shouldHaveProxy, Proxy.Type proxyType, ProxyOptions proxyOptions, String requestUrl) { OkHttpClient validatorClient = okHttpClientWithProxyValidation(shouldHaveProxy, proxyType); HttpClient okClient = new OkHttpAsyncHttpClientBuilder(validatorClient) .proxy(proxyOptions) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, requestUrl))) .verifyErrorMatches(throwable -> throwable.getMessage() .contains(TestEventListenerValidator.EXPECTED_EXCEPTION_MESSAGE)); } @Test public void buildWithConfigurationNone() { HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .configuration(Configuration.NONE) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } @ParameterizedTest @MethodSource("buildWithConfigurationProxySupplier") public void buildWithConfigurationProxy(boolean shouldHaveProxy, Configuration configuration, String requestUrl) { OkHttpClient validatorClient = okHttpClientWithProxyValidation(shouldHaveProxy, Proxy.Type.HTTP); HttpClient okClient = new OkHttpAsyncHttpClientBuilder(validatorClient) .configuration(configuration) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, requestUrl))) .verifyErrorMatches(throwable -> throwable.getMessage() .contains(TestEventListenerValidator.EXPECTED_EXCEPTION_MESSAGE)); } private static Stream<Arguments> buildWithConfigurationProxySupplier() { Supplier<Configuration> baseJavaProxyConfigurationSupplier = () -> new Configuration() .put(JAVA_PROXY_PREREQUISITE, "true") .put(JAVA_HTTP_PROXY_HOST, "localhost") .put(JAVA_HTTP_PROXY_PORT, "12345"); List<Arguments> arguments = new ArrayList<>(); /* * Simple non-authenticated HTTP proxies. */ arguments.add(Arguments.of(true, baseJavaProxyConfigurationSupplier.get(), defaultUrl)); Configuration simpleEnvProxy = new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: arguments.add(Arguments.of(true, simpleEnvProxy, defaultUrl)); /* * HTTP proxy with authentication configured. */ Configuration javaProxyWithAuthentication = baseJavaProxyConfigurationSupplier.get() .put(JAVA_HTTP_PROXY_USER, "1") .put(JAVA_HTTP_PROXY_PASSWORD, "1"); arguments.add(Arguments.of(true, javaProxyWithAuthentication, defaultUrl)); Configuration envProxyWithAuthentication = new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: arguments.add(Arguments.of(true, envProxyWithAuthentication, defaultUrl)); /* * Information for non-proxy hosts testing. */ String rawJavaNonProxyHosts = String.join("|", "localhost", "127.0.0.1", "*.microsoft.com", "*.linkedin.com"); String rawEnvNonProxyHosts = String.join(",", "localhost", "127.0.0.1", "*.microsoft.com", "*.linkedin.com"); String[] requestUrlsWithoutProxying = new String[]{ "http: }; String[] requestUrlsWithProxying = new String[]{ "http: }; /* * HTTP proxies with non-proxy hosts configured. */ Supplier<Configuration> javaNonProxyHostsSupplier = () -> baseJavaProxyConfigurationSupplier.get() .put(JAVA_NON_PROXY_HOSTS, rawJavaNonProxyHosts); Supplier<Configuration> envNonProxyHostsSupplier = () -> new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: .put(Configuration.PROPERTY_NO_PROXY, rawEnvNonProxyHosts); List<Supplier<Configuration>> nonProxyHostsSuppliers = Arrays.asList(javaNonProxyHostsSupplier, envNonProxyHostsSupplier); for (Supplier<Configuration> configurationSupplier : nonProxyHostsSuppliers) { for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, configurationSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, configurationSupplier.get(), requestUrl)); } } /* * HTTP proxies with authentication and non-proxy hosts configured. */ Supplier<Configuration> authenticatedJavaNonProxyHostsSupplier = () -> javaNonProxyHostsSupplier.get() .put(JAVA_HTTP_PROXY_USER, "1") .put(JAVA_HTTP_PROXY_PASSWORD, "1"); Supplier<Configuration> authenticatedEnvNonProxyHostsSupplier = () -> new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: .put(Configuration.PROPERTY_NO_PROXY, rawEnvNonProxyHosts); List<Supplier<Configuration>> authenticatedNonProxyHostsSuppliers = Arrays.asList( authenticatedJavaNonProxyHostsSupplier, authenticatedEnvNonProxyHostsSupplier); for (Supplier<Configuration> configurationSupplier : authenticatedNonProxyHostsSuppliers) { for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, configurationSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, configurationSupplier.get(), requestUrl)); } } return arguments.stream(); } private static OkHttpClient okHttpClientWithProxyValidation(boolean shouldHaveProxy, Proxy.Type proxyType) { return new OkHttpClient.Builder() .eventListener(new TestEventListenerValidator(shouldHaveProxy, proxyType)) .build(); } private static final class TestEventListenerValidator extends EventListener { private static final String EXPECTED_EXCEPTION_MESSAGE = "This is a local test so we cannot connect to remote " + "hosts eagerly. This is exception is expected."; private static final RuntimeException EXPECTED_EXCEPTION = new RuntimeException(EXPECTED_EXCEPTION_MESSAGE); private final boolean shouldHaveProxy; private final Proxy.Type proxyType; private TestEventListenerValidator(boolean shouldHaveProxy, Proxy.Type proxyType) { this.shouldHaveProxy = shouldHaveProxy; this.proxyType = proxyType; } @Override public void connectStart(Call call, InetSocketAddress inetSocketAddress, Proxy proxy) { RuntimeException exception = EXPECTED_EXCEPTION; try { if (shouldHaveProxy) { assertNotNull(proxy.address()); assertEquals(proxyType, proxy.type()); } else { assertEquals(Proxy.NO_PROXY, proxy); } } catch (Throwable throwable) { exception = new RuntimeException(throwable); } throw exception; } } }
class OkHttpAsyncHttpClientBuilderTests { private static final String COOKIE_VALIDATOR_PATH = "/cookieValidator"; private static final String DEFAULT_PATH = "/default"; private static final String DISPATCHER_PATH = "/dispatcher"; private static final String JAVA_PROXY_PREREQUISITE = "java.net.useSystemProxies"; private static final String JAVA_NON_PROXY_HOSTS = "http.nonProxyHosts"; private static final String JAVA_HTTP_PROXY_HOST = "http.proxyHost"; private static final String JAVA_HTTP_PROXY_PORT = "http.proxyPort"; private static final String JAVA_HTTP_PROXY_USER = "http.proxyUser"; private static final String JAVA_HTTP_PROXY_PASSWORD = "http.proxyPassword"; private static WireMockServer server; private static String cookieValidatorUrl; private static String defaultUrl; private static String dispatcherUrl; @BeforeAll public static void setupWireMock() { server = new WireMockServer(WireMockConfiguration.options().dynamicPort().disableRequestJournal()); server.stubFor(WireMock.get(COOKIE_VALIDATOR_PATH).withCookie("test", WireMock.matching("success")) .willReturn(WireMock.aResponse().withStatus(200))); server.stubFor(WireMock.get(DEFAULT_PATH).willReturn(WireMock.aResponse().withStatus(200))); server.stubFor(WireMock.get(DISPATCHER_PATH).willReturn(WireMock.aResponse().withStatus(200) .withFixedDelay(5000))); server.start(); cookieValidatorUrl = "http: defaultUrl = "http: dispatcherUrl = "http: } @AfterAll public static void shutdownWireMock() { if (server.isRunning()) { server.shutdown(); } } /** * Tests that an {@link OkHttpAsyncHttpClient} is able to be built from an existing {@link OkHttpClient}. */ @Test public void buildClientWithExistingClient() { OkHttpClient existingClient = new OkHttpClient.Builder() .addInterceptor(chain -> chain .proceed(chain.request().newBuilder().addHeader("Cookie", "test=success").build())) .build(); HttpClient client = new OkHttpAsyncHttpClientBuilder(existingClient).build(); StepVerifier.create(client.send(new HttpRequest(HttpMethod.GET, cookieValidatorUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that instantiating an {@link OkHttpAsyncHttpClientBuilder} with a {@code null} {@link OkHttpClient} will * throw a {@link NullPointerException}. */ @Test public void startingWithNullClientThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder(null)); } /** * Tests that adding an {@link Interceptor} is handled correctly. */ @Test public void addNetworkInterceptor() { Interceptor testInterceptor = chain -> chain.proceed(chain.request().newBuilder() .addHeader("Cookie", "test=success").build()); HttpClient client = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(testInterceptor) .build(); StepVerifier.create(client.send(new HttpRequest(HttpMethod.GET, cookieValidatorUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that adding a {@code null} {@link Interceptor} will throw a {@link NullPointerException}. */ @Test public void nullNetworkInterceptorThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().addNetworkInterceptor(null)); } /** * Tests that the {@link Interceptor interceptors} in the client are replace-able by setting a new list of * interceptors. */ @Test public void setNetworkInterceptors() { Interceptor badCookieSetter = chain -> chain.proceed(chain.request().newBuilder() .addHeader("Cookie", "test=failure").build()); Interceptor goodCookieSetter = chain -> chain.proceed(chain.request().newBuilder() .addHeader("Cookie", "test=success").build()); HttpClient client = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(badCookieSetter) .networkInterceptors(Collections.singletonList(goodCookieSetter)) .build(); StepVerifier.create(client.send(new HttpRequest(HttpMethod.GET, cookieValidatorUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests that setting the {@link Interceptor interceptors} to {@code null} will throw a {@link * NullPointerException}. */ @Test public void nullNetworkInterceptorsThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().networkInterceptors(null)); } /** * Tests building a client with a given {@code connectionTimeout}. */ @Test public void buildWithConnectionTimeout() { int expectedConnectionTimeoutMillis = 3600 * 1000; Interceptor validatorInterceptor = chain -> { assertEquals(expectedConnectionTimeoutMillis, chain.connectTimeoutMillis()); return chain.proceed(chain.request()); }; HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(validatorInterceptor) .connectionTimeout(Duration.ofSeconds(3600)) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests building a client with a given {@code connectionTimeout}. */ @Test public void buildWithReadTimeout() { int expectedReadTimeoutMillis = 3600 * 1000; Interceptor validatorInterceptor = chain -> { assertEquals(expectedReadTimeoutMillis, chain.readTimeoutMillis()); return chain.proceed(chain.request()); }; HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .addNetworkInterceptor(validatorInterceptor) .readTimeout(Duration.ofSeconds(3600)) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } /** * Tests building a client with a given {@code connectionPool}. */ @Test public void buildWithConnectionPool() { ConnectionPool connectionPool = new ConnectionPool(); HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .connectionPool(connectionPool) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); assertEquals(1, connectionPool.connectionCount()); } /** * Tests that passing a {@code null} {@code connectionPool} to the builder will throw a {@link * NullPointerException}. */ @Test public void nullConnectionPoolThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().connectionPool(null)); } /** * Tests building a client with a given {@code dispatcher}. */ @Test public void buildWithDispatcher() { String expectedThreadName = "testDispatcher"; Dispatcher dispatcher = new Dispatcher(Executors .newFixedThreadPool(1, (Runnable r) -> new Thread(r, expectedThreadName))); HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .dispatcher(dispatcher) .build(); /* * Schedule a task that will run in one second to cancel all requests sent using the dispatcher. This should * result in the request we are about to send to be cancelled since WireMock will wait 5 seconds before * returning a response. */ new Timer().schedule(new TimerTask() { @Override public void run() { assertEquals(1, dispatcher.runningCallsCount()); dispatcher.cancelAll(); } }, 1000); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, dispatcherUrl))) .verifyError(); } /** * Tests that passing a {@code null} {@code dispatcher} to the builder will throw a {@link NullPointerException}. */ @Test public void nullDispatcherThrows() { assertThrows(NullPointerException.class, () -> new OkHttpAsyncHttpClientBuilder().dispatcher(null)); } /** * Tests that building a client with a proxy will send the request through the proxy server. */ @ParameterizedTest @MethodSource("buildWithProxySupplier") public void buildWithProxy(boolean shouldHaveProxy, Proxy.Type proxyType, ProxyOptions proxyOptions, String requestUrl) { OkHttpClient validatorClient = okHttpClientWithProxyValidation(shouldHaveProxy, proxyType); HttpClient okClient = new OkHttpAsyncHttpClientBuilder(validatorClient) .proxy(proxyOptions) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, requestUrl))) .verifyErrorMatches(throwable -> throwable.getMessage() .contains(TestEventListenerValidator.EXPECTED_EXCEPTION_MESSAGE)); } @Test public void buildWithConfigurationNone() { HttpClient okClient = new OkHttpAsyncHttpClientBuilder() .configuration(Configuration.NONE) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, defaultUrl))) .assertNext(response -> assertEquals(200, response.getStatusCode())) .verifyComplete(); } @ParameterizedTest @MethodSource("buildWithConfigurationProxySupplier") public void buildWithConfigurationProxy(boolean shouldHaveProxy, Configuration configuration, String requestUrl) { OkHttpClient validatorClient = okHttpClientWithProxyValidation(shouldHaveProxy, Proxy.Type.HTTP); HttpClient okClient = new OkHttpAsyncHttpClientBuilder(validatorClient) .configuration(configuration) .build(); StepVerifier.create(okClient.send(new HttpRequest(HttpMethod.GET, requestUrl))) .verifyErrorMatches(throwable -> throwable.getMessage() .contains(TestEventListenerValidator.EXPECTED_EXCEPTION_MESSAGE)); } private static Stream<Arguments> buildWithConfigurationProxySupplier() { Supplier<Configuration> baseJavaProxyConfigurationSupplier = () -> new Configuration() .put(JAVA_PROXY_PREREQUISITE, "true") .put(JAVA_HTTP_PROXY_HOST, "localhost") .put(JAVA_HTTP_PROXY_PORT, "12345"); List<Arguments> arguments = new ArrayList<>(); /* * Simple non-authenticated HTTP proxies. */ arguments.add(Arguments.of(true, baseJavaProxyConfigurationSupplier.get(), defaultUrl)); Configuration simpleEnvProxy = new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: arguments.add(Arguments.of(true, simpleEnvProxy, defaultUrl)); /* * HTTP proxy with authentication configured. */ Configuration javaProxyWithAuthentication = baseJavaProxyConfigurationSupplier.get() .put(JAVA_HTTP_PROXY_USER, "1") .put(JAVA_HTTP_PROXY_PASSWORD, "1"); arguments.add(Arguments.of(true, javaProxyWithAuthentication, defaultUrl)); Configuration envProxyWithAuthentication = new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: arguments.add(Arguments.of(true, envProxyWithAuthentication, defaultUrl)); /* * Information for non-proxy hosts testing. */ String rawJavaNonProxyHosts = String.join("|", "localhost", "127.0.0.1", "*.microsoft.com", "*.linkedin.com"); String rawEnvNonProxyHosts = String.join(",", "localhost", "127.0.0.1", "*.microsoft.com", "*.linkedin.com"); String[] requestUrlsWithoutProxying = new String[]{ "http: }; String[] requestUrlsWithProxying = new String[]{ "http: }; /* * HTTP proxies with non-proxy hosts configured. */ Supplier<Configuration> javaNonProxyHostsSupplier = () -> baseJavaProxyConfigurationSupplier.get() .put(JAVA_NON_PROXY_HOSTS, rawJavaNonProxyHosts); Supplier<Configuration> envNonProxyHostsSupplier = () -> new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: .put(Configuration.PROPERTY_NO_PROXY, rawEnvNonProxyHosts); List<Supplier<Configuration>> nonProxyHostsSuppliers = Arrays.asList(javaNonProxyHostsSupplier, envNonProxyHostsSupplier); for (Supplier<Configuration> configurationSupplier : nonProxyHostsSuppliers) { for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, configurationSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, configurationSupplier.get(), requestUrl)); } } /* * HTTP proxies with authentication and non-proxy hosts configured. */ Supplier<Configuration> authenticatedJavaNonProxyHostsSupplier = () -> javaNonProxyHostsSupplier.get() .put(JAVA_HTTP_PROXY_USER, "1") .put(JAVA_HTTP_PROXY_PASSWORD, "1"); Supplier<Configuration> authenticatedEnvNonProxyHostsSupplier = () -> new Configuration() .put(Configuration.PROPERTY_HTTP_PROXY, "http: .put(Configuration.PROPERTY_NO_PROXY, rawEnvNonProxyHosts); List<Supplier<Configuration>> authenticatedNonProxyHostsSuppliers = Arrays.asList( authenticatedJavaNonProxyHostsSupplier, authenticatedEnvNonProxyHostsSupplier); for (Supplier<Configuration> configurationSupplier : authenticatedNonProxyHostsSuppliers) { for (String requestUrl : requestUrlsWithoutProxying) { arguments.add(Arguments.of(false, configurationSupplier.get(), requestUrl)); } for (String requestUrl : requestUrlsWithProxying) { arguments.add(Arguments.of(true, configurationSupplier.get(), requestUrl)); } } return arguments.stream(); } private static OkHttpClient okHttpClientWithProxyValidation(boolean shouldHaveProxy, Proxy.Type proxyType) { return new OkHttpClient.Builder() .eventListener(new TestEventListenerValidator(shouldHaveProxy, proxyType)) .build(); } private static final class TestEventListenerValidator extends EventListener { private static final String EXPECTED_EXCEPTION_MESSAGE = "This is a local test so we cannot connect to remote " + "hosts eagerly. This is exception is expected."; private static final RuntimeException EXPECTED_EXCEPTION = new RuntimeException(EXPECTED_EXCEPTION_MESSAGE); private final boolean shouldHaveProxy; private final Proxy.Type proxyType; private TestEventListenerValidator(boolean shouldHaveProxy, Proxy.Type proxyType) { this.shouldHaveProxy = shouldHaveProxy; this.proxyType = proxyType; } @Override public void connectStart(Call call, InetSocketAddress inetSocketAddress, Proxy proxy) { RuntimeException exception = EXPECTED_EXCEPTION; try { if (shouldHaveProxy) { assertNotNull(proxy.address()); assertEquals(proxyType, proxy.type()); } else { assertEquals(Proxy.NO_PROXY, proxy); } } catch (Throwable throwable) { exception = new RuntimeException(throwable); } throw exception; } } }
The names of both these methods are very similar - sanitizeNoProxy and sanitizeNonProxyHosts and they almost do the same thing. It might be worth combining them or having a more descriptive method name.
private static String sanitizeNoProxy(String noProxyString) { /* * The 'NO_PROXY' environment variable is expected to be delimited by ','. */ return sanitizeNonProxyHosts(noProxyString.split(",")); }
}
private static String sanitizeNoProxy(String noProxyString) { /* * The 'NO_PROXY' environment variable is expected to be delimited by ','. */ String[] nonProxyHosts = noProxyString.split(","); for (int i = 0; i < nonProxyHosts.length; i++) { /* * 'NO_PROXY' doesn't have a strongly standardized format, for now we are going to support values beginning * and ending with '*' or '.' to exclude an entire domain and will quote the value between the prefix and * suffix. In the future this may need to be updated to support more complex scenarios required by * 'NO_PROXY' users such as wild cards within the domain exclusion. */ String prefixWildcard = ""; String suffixWildcard = ""; String body = nonProxyHosts[i]; /* * First check if the non-proxy host begins with a qualified quantifier and extract it from being quoted, * then check if it is a non-qualified quantifier and qualifier and extract it from being quoted. */ if (body.startsWith(".*")) { prefixWildcard = ".*"; body = body.substring(2); } else if (body.startsWith("*") || body.startsWith(".")) { prefixWildcard = ".*"; body = body.substring(1); } /* * First check if the non-proxy host ends with a qualified quantifier and extract it from being quoted, * then check if it is a non-qualified quantifier and qualifier and extract it from being quoted. */ if (body.endsWith(".*")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 2); } else if (body.endsWith("*") || body.endsWith(".")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 1); } /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' where * without quoting the '.' in the string would be treated as the match any character instead of the literal * '.' character. */ nonProxyHosts[i] = prefixWildcard + Pattern.quote(body) + suffixWildcard; } return String.join("|", nonProxyHosts); }
class ProxyOptions { private static final ClientLogger LOGGER = new ClientLogger(ProxyOptions.class); private static final String INVALID_CONFIGURATION_MESSAGE = "'configuration' cannot be 'Configuration.NONE'."; private static final String INVALID_AZURE_PROXY_URL = "Configuration {} is an invalid URL and is being ignored."; /* * This indicates whether Java environment proxy configurations are allowed to be used. */ private static final String JAVA_PROXY_PREREQUISITE = "java.net.useSystemProxies"; /* * Java environment variables related to proxies. The protocol is removed since these are the same for 'https' and * 'http', the exception is 'http.nonProxyHosts' as it is used for both. */ private static final String JAVA_PROXY_HOST = "proxyHost"; private static final String JAVA_PROXY_PORT = "proxyPort"; private static final String JAVA_PROXY_USER = "proxyUser"; private static final String JAVA_PROXY_PASSWORD = "proxyPassword"; private static final String JAVA_NON_PROXY_HOSTS = "http.nonProxyHosts"; private static final String HTTPS = "https"; private static final int DEFAULT_HTTPS_PORT = 443; private static final String HTTP = "http"; private static final int DEFAULT_HTTP_PORT = 80; private static final List<Function<Configuration, ProxyOptions>> ENVIRONMENT_LOAD_ORDER = Arrays.asList( configuration -> attemptToLoadAzureProxy(configuration, Configuration.PROPERTY_HTTPS_PROXY), configuration -> attemptToLoadAzureProxy(configuration, Configuration.PROPERTY_HTTP_PROXY), configuration -> attemptToLoadJavaProxy(configuration, HTTPS), configuration -> attemptToLoadJavaProxy(configuration, HTTP) ); private final InetSocketAddress address; private final Type type; private String username; private String password; private String nonProxyHosts; /** * Creates ProxyOptions. * * @param type the proxy type * @param address the proxy address (ip and port number) */ public ProxyOptions(Type type, InetSocketAddress address) { this.type = type; this.address = address; } /** * Set the proxy credentials. * * @param username proxy user name * @param password proxy password * @return the updated ProxyOptions object */ public ProxyOptions setCredentials(String username, String password) { this.username = Objects.requireNonNull(username, "'username' cannot be null."); this.password = Objects.requireNonNull(password, "'password' cannot be null."); return this; } /** * Sets the hosts which bypass the proxy. * * <p> * The expected format of the passed string is a {@code '|'} delimited list of hosts which should bypass the proxy. * Individual host strings may contain regex characters such as {@code '*'}. * * @param nonProxyHosts Hosts that bypass the proxy. * @return the updated ProxyOptions object */ public ProxyOptions setNonProxyHosts(String nonProxyHosts) { this.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHosts); return this; } /** * @return the address of the proxy. */ public InetSocketAddress getAddress() { return address; } /** * @return the type of the proxy. */ public Type getType() { return type; } /** * @return the proxy user name. */ public String getUsername() { return this.username; } /** * @return the proxy password. */ public String getPassword() { return this.password; } /** * @return the hosts that bypass the proxy. */ public String getNonProxyHosts() { return this.nonProxyHosts; } /** * Attempts to load a proxy from the environment. * * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * Configuration * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. * @throws IllegalArgumentException If {@code configuration} is {@link Configuration */ public static ProxyOptions fromConfiguration(Configuration configuration) { if (configuration == Configuration.NONE) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException(INVALID_CONFIGURATION_MESSAGE)); } Configuration proxyConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; for (Function<Configuration, ProxyOptions> loader : ENVIRONMENT_LOAD_ORDER) { ProxyOptions proxyOptions = loader.apply(proxyConfiguration); if (proxyOptions != null) { return proxyOptions; } } return null; } private static ProxyOptions attemptToLoadAzureProxy(Configuration configuration, String proxyProperty) { String proxyConfiguration = configuration.get(proxyProperty); if (CoreUtils.isNullOrEmpty(proxyConfiguration)) { return null; } try { URL proxyUrl = new URL(proxyConfiguration); int port = (proxyUrl.getPort() == -1) ? proxyUrl.getDefaultPort() : proxyUrl.getPort(); ProxyOptions proxyOptions = new ProxyOptions(Type.HTTP, new InetSocketAddress(proxyUrl.getHost(), port)); String nonProxyHostsString = configuration.get(Configuration.PROPERTY_NO_PROXY); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeNoProxy(nonProxyHostsString); } String userInfo = proxyUrl.getUserInfo(); if (userInfo != null) { String[] usernamePassword = userInfo.split(":", 2); if (usernamePassword.length == 2) { try { proxyOptions.setCredentials( URLDecoder.decode(usernamePassword[0], StandardCharsets.UTF_8.toString()), URLDecoder.decode(usernamePassword[1], StandardCharsets.UTF_8.toString()) ); } catch (UnsupportedEncodingException e) { return null; } } } return proxyOptions; } catch (MalformedURLException ex) { LOGGER.warning(INVALID_AZURE_PROXY_URL, proxyProperty); return null; } } /* * Helper function that sanitizes 'NO_PROXY' into a Pattern safe string. */ private static ProxyOptions attemptToLoadJavaProxy(Configuration configuration, String type) { if (!Boolean.parseBoolean(configuration.get(JAVA_PROXY_PREREQUISITE))) { return null; } String host = configuration.get(type + "." + JAVA_PROXY_HOST); if (CoreUtils.isNullOrEmpty(host)) { return null; } int port; try { port = Integer.parseInt(configuration.get(type + "." + JAVA_PROXY_PORT)); } catch (NumberFormatException ex) { port = HTTPS.equals(type) ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT; } ProxyOptions proxyOptions = new ProxyOptions(Type.HTTP, new InetSocketAddress(host, port)); String nonProxyHostsString = configuration.get(JAVA_NON_PROXY_HOSTS); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHostsString); } String username = configuration.get(type + "." + JAVA_PROXY_USER); String password = configuration.get(type + "." + JAVA_PROXY_PASSWORD); if (username != null && password != null) { proxyOptions.setCredentials(username, password); } return proxyOptions; } /* * Helper function that sanitizes 'http.nonProxyHosts' into a Pattern safe string. */ private static String sanitizeJavaHttpNonProxyHosts(String nonProxyHostsString) { /* * The 'http.nonProxyHosts' system property is expected to be delimited by '|'. */ return sanitizeNonProxyHosts(nonProxyHostsString.split("\\|")); } /* * Helper function that sanitizes non-proxy hosts into a Pattern safe string. */ private static String sanitizeNonProxyHosts(String[] nonProxyHosts) { for (int i = 0; i < nonProxyHosts.length; i++) { /* * Non-proxy hosts are allowed to begin and end with '*' but this is an invalid value for a pattern, so we * need to qualify the quantifier with the match all '.' character. */ String prefixWildcard = ""; String suffixWildcard = ""; String body = nonProxyHosts[i]; if (body.startsWith("*")) { prefixWildcard = ".*"; body = body.substring(1); } if (body.endsWith("*")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 1); } /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' where * without quoting the '.' in the string would be treated as the match any character instead of the literal * '.' character. */ nonProxyHosts[i] = prefixWildcard + Pattern.quote(body) + suffixWildcard; } return String.join("|", nonProxyHosts); } /** * The type of the proxy. */ public enum Type { /** * HTTP proxy type. */ HTTP(Proxy.Type.HTTP), /** * SOCKS4 proxy type. */ SOCKS4(Proxy.Type.SOCKS), /** * SOCKS5 proxy type. */ SOCKS5(Proxy.Type.SOCKS); private final Proxy.Type proxyType; Type(Proxy.Type proxyType) { this.proxyType = proxyType; } /** * Get the {@link Proxy.Type} equivalent of this type. * * @return the proxy type */ public Proxy.Type toProxyType() { return proxyType; } } }
class ProxyOptions { private static final ClientLogger LOGGER = new ClientLogger(ProxyOptions.class); private static final String INVALID_CONFIGURATION_MESSAGE = "'configuration' cannot be 'Configuration.NONE'."; private static final String INVALID_AZURE_PROXY_URL = "Configuration {} is an invalid URL and is being ignored."; /* * This indicates whether Java environment proxy configurations are allowed to be used. */ private static final String JAVA_PROXY_PREREQUISITE = "java.net.useSystemProxies"; /* * Java environment variables related to proxies. The protocol is removed since these are the same for 'https' and * 'http', the exception is 'http.nonProxyHosts' as it is used for both. */ private static final String JAVA_PROXY_HOST = "proxyHost"; private static final String JAVA_PROXY_PORT = "proxyPort"; private static final String JAVA_PROXY_USER = "proxyUser"; private static final String JAVA_PROXY_PASSWORD = "proxyPassword"; private static final String JAVA_NON_PROXY_HOSTS = "http.nonProxyHosts"; private static final String HTTPS = "https"; private static final int DEFAULT_HTTPS_PORT = 443; private static final String HTTP = "http"; private static final int DEFAULT_HTTP_PORT = 80; private static final List<Function<Configuration, ProxyOptions>> ENVIRONMENT_LOAD_ORDER = Arrays.asList( configuration -> attemptToLoadAzureProxy(configuration, Configuration.PROPERTY_HTTPS_PROXY), configuration -> attemptToLoadAzureProxy(configuration, Configuration.PROPERTY_HTTP_PROXY), configuration -> attemptToLoadJavaProxy(configuration, HTTPS), configuration -> attemptToLoadJavaProxy(configuration, HTTP) ); private final InetSocketAddress address; private final Type type; private String username; private String password; private String nonProxyHosts; /** * Creates ProxyOptions. * * @param type the proxy type * @param address the proxy address (ip and port number) */ public ProxyOptions(Type type, InetSocketAddress address) { this.type = type; this.address = address; } /** * Set the proxy credentials. * * @param username proxy user name * @param password proxy password * @return the updated ProxyOptions object */ public ProxyOptions setCredentials(String username, String password) { this.username = Objects.requireNonNull(username, "'username' cannot be null."); this.password = Objects.requireNonNull(password, "'password' cannot be null."); return this; } /** * Sets the hosts which bypass the proxy. * * <p> * The expected format of the passed string is a {@code '|'} delimited list of hosts which should bypass the proxy. * Individual host strings may contain regex characters such as {@code '*'}. * * @param nonProxyHosts Hosts that bypass the proxy. * @return the updated ProxyOptions object */ public ProxyOptions setNonProxyHosts(String nonProxyHosts) { this.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHosts); return this; } /** * @return the address of the proxy. */ public InetSocketAddress getAddress() { return address; } /** * @return the type of the proxy. */ public Type getType() { return type; } /** * @return the proxy user name. */ public String getUsername() { return this.username; } /** * @return the proxy password. */ public String getPassword() { return this.password; } /** * @return the hosts that bypass the proxy. */ public String getNonProxyHosts() { return this.nonProxyHosts; } /** * Attempts to load a proxy from the environment. * * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * Configuration * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. * @throws IllegalArgumentException If {@code configuration} is {@link Configuration */ public static ProxyOptions fromConfiguration(Configuration configuration) { if (configuration == Configuration.NONE) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException(INVALID_CONFIGURATION_MESSAGE)); } Configuration proxyConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; for (Function<Configuration, ProxyOptions> loader : ENVIRONMENT_LOAD_ORDER) { ProxyOptions proxyOptions = loader.apply(proxyConfiguration); if (proxyOptions != null) { return proxyOptions; } } return null; } private static ProxyOptions attemptToLoadAzureProxy(Configuration configuration, String proxyProperty) { String proxyConfiguration = configuration.get(proxyProperty); if (CoreUtils.isNullOrEmpty(proxyConfiguration)) { return null; } try { URL proxyUrl = new URL(proxyConfiguration); int port = (proxyUrl.getPort() == -1) ? proxyUrl.getDefaultPort() : proxyUrl.getPort(); ProxyOptions proxyOptions = new ProxyOptions(Type.HTTP, new InetSocketAddress(proxyUrl.getHost(), port)); String nonProxyHostsString = configuration.get(Configuration.PROPERTY_NO_PROXY); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeNoProxy(nonProxyHostsString); } String userInfo = proxyUrl.getUserInfo(); if (userInfo != null) { String[] usernamePassword = userInfo.split(":", 2); if (usernamePassword.length == 2) { try { proxyOptions.setCredentials( URLDecoder.decode(usernamePassword[0], StandardCharsets.UTF_8.toString()), URLDecoder.decode(usernamePassword[1], StandardCharsets.UTF_8.toString()) ); } catch (UnsupportedEncodingException e) { return null; } } } return proxyOptions; } catch (MalformedURLException ex) { LOGGER.warning(INVALID_AZURE_PROXY_URL, proxyProperty); return null; } } /* * Helper function that sanitizes 'NO_PROXY' into a Pattern safe string. */ private static ProxyOptions attemptToLoadJavaProxy(Configuration configuration, String type) { if (!Boolean.parseBoolean(configuration.get(JAVA_PROXY_PREREQUISITE))) { return null; } String host = configuration.get(type + "." + JAVA_PROXY_HOST); if (CoreUtils.isNullOrEmpty(host)) { return null; } int port; try { port = Integer.parseInt(configuration.get(type + "." + JAVA_PROXY_PORT)); } catch (NumberFormatException ex) { port = HTTPS.equals(type) ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT; } ProxyOptions proxyOptions = new ProxyOptions(Type.HTTP, new InetSocketAddress(host, port)); String nonProxyHostsString = configuration.get(JAVA_NON_PROXY_HOSTS); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHostsString); } String username = configuration.get(type + "." + JAVA_PROXY_USER); String password = configuration.get(type + "." + JAVA_PROXY_PASSWORD); if (username != null && password != null) { proxyOptions.setCredentials(username, password); } return proxyOptions; } /* * Helper function that sanitizes 'http.nonProxyHosts' into a Pattern safe string. */ private static String sanitizeJavaHttpNonProxyHosts(String nonProxyHostsString) { /* * The 'http.nonProxyHosts' system property is expected to be delimited by '|'. */ String[] nonProxyHosts = nonProxyHostsString.split("\\|"); for (int i = 0; i < nonProxyHosts.length; i++) { /* * 'http.nonProxyHosts' values are allowed to begin and end with '*' but this is an invalid value for a * pattern, so we need to qualify the quantifier with the match all '.' character. */ String prefixWildcard = ""; String suffixWildcard = ""; String body = nonProxyHosts[i]; if (body.startsWith("*")) { prefixWildcard = ".*"; body = body.substring(1); } if (body.endsWith("*")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 1); } /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' where * without quoting the '.' in the string would be treated as the match any character instead of the literal * '.' character. */ nonProxyHosts[i] = prefixWildcard + Pattern.quote(body) + suffixWildcard; } return String.join("|", nonProxyHosts); } /** * The type of the proxy. */ public enum Type { /** * HTTP proxy type. */ HTTP(Proxy.Type.HTTP), /** * SOCKS4 proxy type. */ SOCKS4(Proxy.Type.SOCKS), /** * SOCKS5 proxy type. */ SOCKS5(Proxy.Type.SOCKS); private final Proxy.Type proxyType; Type(Proxy.Type proxyType) { this.proxyType = proxyType; } /** * Get the {@link Proxy.Type} equivalent of this type. * * @return the proxy type */ public Proxy.Type toProxyType() { return proxyType; } } }
Removed the method being called into as the expected format for `NO_PROXY` and `http.nonProxyHosts` differ enough to warrant separate code paths for sanitation.
private static String sanitizeNoProxy(String noProxyString) { /* * The 'NO_PROXY' environment variable is expected to be delimited by ','. */ return sanitizeNonProxyHosts(noProxyString.split(",")); }
}
private static String sanitizeNoProxy(String noProxyString) { /* * The 'NO_PROXY' environment variable is expected to be delimited by ','. */ String[] nonProxyHosts = noProxyString.split(","); for (int i = 0; i < nonProxyHosts.length; i++) { /* * 'NO_PROXY' doesn't have a strongly standardized format, for now we are going to support values beginning * and ending with '*' or '.' to exclude an entire domain and will quote the value between the prefix and * suffix. In the future this may need to be updated to support more complex scenarios required by * 'NO_PROXY' users such as wild cards within the domain exclusion. */ String prefixWildcard = ""; String suffixWildcard = ""; String body = nonProxyHosts[i]; /* * First check if the non-proxy host begins with a qualified quantifier and extract it from being quoted, * then check if it is a non-qualified quantifier and qualifier and extract it from being quoted. */ if (body.startsWith(".*")) { prefixWildcard = ".*"; body = body.substring(2); } else if (body.startsWith("*") || body.startsWith(".")) { prefixWildcard = ".*"; body = body.substring(1); } /* * First check if the non-proxy host ends with a qualified quantifier and extract it from being quoted, * then check if it is a non-qualified quantifier and qualifier and extract it from being quoted. */ if (body.endsWith(".*")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 2); } else if (body.endsWith("*") || body.endsWith(".")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 1); } /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' where * without quoting the '.' in the string would be treated as the match any character instead of the literal * '.' character. */ nonProxyHosts[i] = prefixWildcard + Pattern.quote(body) + suffixWildcard; } return String.join("|", nonProxyHosts); }
class ProxyOptions { private static final ClientLogger LOGGER = new ClientLogger(ProxyOptions.class); private static final String INVALID_CONFIGURATION_MESSAGE = "'configuration' cannot be 'Configuration.NONE'."; private static final String INVALID_AZURE_PROXY_URL = "Configuration {} is an invalid URL and is being ignored."; /* * This indicates whether Java environment proxy configurations are allowed to be used. */ private static final String JAVA_PROXY_PREREQUISITE = "java.net.useSystemProxies"; /* * Java environment variables related to proxies. The protocol is removed since these are the same for 'https' and * 'http', the exception is 'http.nonProxyHosts' as it is used for both. */ private static final String JAVA_PROXY_HOST = "proxyHost"; private static final String JAVA_PROXY_PORT = "proxyPort"; private static final String JAVA_PROXY_USER = "proxyUser"; private static final String JAVA_PROXY_PASSWORD = "proxyPassword"; private static final String JAVA_NON_PROXY_HOSTS = "http.nonProxyHosts"; private static final String HTTPS = "https"; private static final int DEFAULT_HTTPS_PORT = 443; private static final String HTTP = "http"; private static final int DEFAULT_HTTP_PORT = 80; private static final List<Function<Configuration, ProxyOptions>> ENVIRONMENT_LOAD_ORDER = Arrays.asList( configuration -> attemptToLoadAzureProxy(configuration, Configuration.PROPERTY_HTTPS_PROXY), configuration -> attemptToLoadAzureProxy(configuration, Configuration.PROPERTY_HTTP_PROXY), configuration -> attemptToLoadJavaProxy(configuration, HTTPS), configuration -> attemptToLoadJavaProxy(configuration, HTTP) ); private final InetSocketAddress address; private final Type type; private String username; private String password; private String nonProxyHosts; /** * Creates ProxyOptions. * * @param type the proxy type * @param address the proxy address (ip and port number) */ public ProxyOptions(Type type, InetSocketAddress address) { this.type = type; this.address = address; } /** * Set the proxy credentials. * * @param username proxy user name * @param password proxy password * @return the updated ProxyOptions object */ public ProxyOptions setCredentials(String username, String password) { this.username = Objects.requireNonNull(username, "'username' cannot be null."); this.password = Objects.requireNonNull(password, "'password' cannot be null."); return this; } /** * Sets the hosts which bypass the proxy. * * <p> * The expected format of the passed string is a {@code '|'} delimited list of hosts which should bypass the proxy. * Individual host strings may contain regex characters such as {@code '*'}. * * @param nonProxyHosts Hosts that bypass the proxy. * @return the updated ProxyOptions object */ public ProxyOptions setNonProxyHosts(String nonProxyHosts) { this.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHosts); return this; } /** * @return the address of the proxy. */ public InetSocketAddress getAddress() { return address; } /** * @return the type of the proxy. */ public Type getType() { return type; } /** * @return the proxy user name. */ public String getUsername() { return this.username; } /** * @return the proxy password. */ public String getPassword() { return this.password; } /** * @return the hosts that bypass the proxy. */ public String getNonProxyHosts() { return this.nonProxyHosts; } /** * Attempts to load a proxy from the environment. * * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * Configuration * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. * @throws IllegalArgumentException If {@code configuration} is {@link Configuration */ public static ProxyOptions fromConfiguration(Configuration configuration) { if (configuration == Configuration.NONE) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException(INVALID_CONFIGURATION_MESSAGE)); } Configuration proxyConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; for (Function<Configuration, ProxyOptions> loader : ENVIRONMENT_LOAD_ORDER) { ProxyOptions proxyOptions = loader.apply(proxyConfiguration); if (proxyOptions != null) { return proxyOptions; } } return null; } private static ProxyOptions attemptToLoadAzureProxy(Configuration configuration, String proxyProperty) { String proxyConfiguration = configuration.get(proxyProperty); if (CoreUtils.isNullOrEmpty(proxyConfiguration)) { return null; } try { URL proxyUrl = new URL(proxyConfiguration); int port = (proxyUrl.getPort() == -1) ? proxyUrl.getDefaultPort() : proxyUrl.getPort(); ProxyOptions proxyOptions = new ProxyOptions(Type.HTTP, new InetSocketAddress(proxyUrl.getHost(), port)); String nonProxyHostsString = configuration.get(Configuration.PROPERTY_NO_PROXY); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeNoProxy(nonProxyHostsString); } String userInfo = proxyUrl.getUserInfo(); if (userInfo != null) { String[] usernamePassword = userInfo.split(":", 2); if (usernamePassword.length == 2) { try { proxyOptions.setCredentials( URLDecoder.decode(usernamePassword[0], StandardCharsets.UTF_8.toString()), URLDecoder.decode(usernamePassword[1], StandardCharsets.UTF_8.toString()) ); } catch (UnsupportedEncodingException e) { return null; } } } return proxyOptions; } catch (MalformedURLException ex) { LOGGER.warning(INVALID_AZURE_PROXY_URL, proxyProperty); return null; } } /* * Helper function that sanitizes 'NO_PROXY' into a Pattern safe string. */ private static ProxyOptions attemptToLoadJavaProxy(Configuration configuration, String type) { if (!Boolean.parseBoolean(configuration.get(JAVA_PROXY_PREREQUISITE))) { return null; } String host = configuration.get(type + "." + JAVA_PROXY_HOST); if (CoreUtils.isNullOrEmpty(host)) { return null; } int port; try { port = Integer.parseInt(configuration.get(type + "." + JAVA_PROXY_PORT)); } catch (NumberFormatException ex) { port = HTTPS.equals(type) ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT; } ProxyOptions proxyOptions = new ProxyOptions(Type.HTTP, new InetSocketAddress(host, port)); String nonProxyHostsString = configuration.get(JAVA_NON_PROXY_HOSTS); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHostsString); } String username = configuration.get(type + "." + JAVA_PROXY_USER); String password = configuration.get(type + "." + JAVA_PROXY_PASSWORD); if (username != null && password != null) { proxyOptions.setCredentials(username, password); } return proxyOptions; } /* * Helper function that sanitizes 'http.nonProxyHosts' into a Pattern safe string. */ private static String sanitizeJavaHttpNonProxyHosts(String nonProxyHostsString) { /* * The 'http.nonProxyHosts' system property is expected to be delimited by '|'. */ return sanitizeNonProxyHosts(nonProxyHostsString.split("\\|")); } /* * Helper function that sanitizes non-proxy hosts into a Pattern safe string. */ private static String sanitizeNonProxyHosts(String[] nonProxyHosts) { for (int i = 0; i < nonProxyHosts.length; i++) { /* * Non-proxy hosts are allowed to begin and end with '*' but this is an invalid value for a pattern, so we * need to qualify the quantifier with the match all '.' character. */ String prefixWildcard = ""; String suffixWildcard = ""; String body = nonProxyHosts[i]; if (body.startsWith("*")) { prefixWildcard = ".*"; body = body.substring(1); } if (body.endsWith("*")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 1); } /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' where * without quoting the '.' in the string would be treated as the match any character instead of the literal * '.' character. */ nonProxyHosts[i] = prefixWildcard + Pattern.quote(body) + suffixWildcard; } return String.join("|", nonProxyHosts); } /** * The type of the proxy. */ public enum Type { /** * HTTP proxy type. */ HTTP(Proxy.Type.HTTP), /** * SOCKS4 proxy type. */ SOCKS4(Proxy.Type.SOCKS), /** * SOCKS5 proxy type. */ SOCKS5(Proxy.Type.SOCKS); private final Proxy.Type proxyType; Type(Proxy.Type proxyType) { this.proxyType = proxyType; } /** * Get the {@link Proxy.Type} equivalent of this type. * * @return the proxy type */ public Proxy.Type toProxyType() { return proxyType; } } }
class ProxyOptions { private static final ClientLogger LOGGER = new ClientLogger(ProxyOptions.class); private static final String INVALID_CONFIGURATION_MESSAGE = "'configuration' cannot be 'Configuration.NONE'."; private static final String INVALID_AZURE_PROXY_URL = "Configuration {} is an invalid URL and is being ignored."; /* * This indicates whether Java environment proxy configurations are allowed to be used. */ private static final String JAVA_PROXY_PREREQUISITE = "java.net.useSystemProxies"; /* * Java environment variables related to proxies. The protocol is removed since these are the same for 'https' and * 'http', the exception is 'http.nonProxyHosts' as it is used for both. */ private static final String JAVA_PROXY_HOST = "proxyHost"; private static final String JAVA_PROXY_PORT = "proxyPort"; private static final String JAVA_PROXY_USER = "proxyUser"; private static final String JAVA_PROXY_PASSWORD = "proxyPassword"; private static final String JAVA_NON_PROXY_HOSTS = "http.nonProxyHosts"; private static final String HTTPS = "https"; private static final int DEFAULT_HTTPS_PORT = 443; private static final String HTTP = "http"; private static final int DEFAULT_HTTP_PORT = 80; private static final List<Function<Configuration, ProxyOptions>> ENVIRONMENT_LOAD_ORDER = Arrays.asList( configuration -> attemptToLoadAzureProxy(configuration, Configuration.PROPERTY_HTTPS_PROXY), configuration -> attemptToLoadAzureProxy(configuration, Configuration.PROPERTY_HTTP_PROXY), configuration -> attemptToLoadJavaProxy(configuration, HTTPS), configuration -> attemptToLoadJavaProxy(configuration, HTTP) ); private final InetSocketAddress address; private final Type type; private String username; private String password; private String nonProxyHosts; /** * Creates ProxyOptions. * * @param type the proxy type * @param address the proxy address (ip and port number) */ public ProxyOptions(Type type, InetSocketAddress address) { this.type = type; this.address = address; } /** * Set the proxy credentials. * * @param username proxy user name * @param password proxy password * @return the updated ProxyOptions object */ public ProxyOptions setCredentials(String username, String password) { this.username = Objects.requireNonNull(username, "'username' cannot be null."); this.password = Objects.requireNonNull(password, "'password' cannot be null."); return this; } /** * Sets the hosts which bypass the proxy. * * <p> * The expected format of the passed string is a {@code '|'} delimited list of hosts which should bypass the proxy. * Individual host strings may contain regex characters such as {@code '*'}. * * @param nonProxyHosts Hosts that bypass the proxy. * @return the updated ProxyOptions object */ public ProxyOptions setNonProxyHosts(String nonProxyHosts) { this.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHosts); return this; } /** * @return the address of the proxy. */ public InetSocketAddress getAddress() { return address; } /** * @return the type of the proxy. */ public Type getType() { return type; } /** * @return the proxy user name. */ public String getUsername() { return this.username; } /** * @return the proxy password. */ public String getPassword() { return this.password; } /** * @return the hosts that bypass the proxy. */ public String getNonProxyHosts() { return this.nonProxyHosts; } /** * Attempts to load a proxy from the environment. * * <p> * Environment configurations are loaded in this order: * <ol> * <li>Azure HTTPS</li> * <li>Azure HTTP</li> * <li>Java HTTPS</li> * <li>Java HTTP</li> * </ol> * * Azure proxy configurations will be preferred over Java proxy configurations as they are more closely scoped to * the purpose of the SDK. Additionally, more secure protocols, HTTPS vs HTTP, will be preferred. * * <p> * {@code null} will be returned if no proxy was found in the environment. * * @param configuration The {@link Configuration} that is used to load proxy configurations from the environment. If * {@code null} is passed then {@link Configuration * Configuration * @return A {@link ProxyOptions} reflecting a proxy loaded from the environment, if no proxy is found {@code null} * will be returned. * @throws IllegalArgumentException If {@code configuration} is {@link Configuration */ public static ProxyOptions fromConfiguration(Configuration configuration) { if (configuration == Configuration.NONE) { throw LOGGER.logExceptionAsWarning(new IllegalArgumentException(INVALID_CONFIGURATION_MESSAGE)); } Configuration proxyConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration() : configuration; for (Function<Configuration, ProxyOptions> loader : ENVIRONMENT_LOAD_ORDER) { ProxyOptions proxyOptions = loader.apply(proxyConfiguration); if (proxyOptions != null) { return proxyOptions; } } return null; } private static ProxyOptions attemptToLoadAzureProxy(Configuration configuration, String proxyProperty) { String proxyConfiguration = configuration.get(proxyProperty); if (CoreUtils.isNullOrEmpty(proxyConfiguration)) { return null; } try { URL proxyUrl = new URL(proxyConfiguration); int port = (proxyUrl.getPort() == -1) ? proxyUrl.getDefaultPort() : proxyUrl.getPort(); ProxyOptions proxyOptions = new ProxyOptions(Type.HTTP, new InetSocketAddress(proxyUrl.getHost(), port)); String nonProxyHostsString = configuration.get(Configuration.PROPERTY_NO_PROXY); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeNoProxy(nonProxyHostsString); } String userInfo = proxyUrl.getUserInfo(); if (userInfo != null) { String[] usernamePassword = userInfo.split(":", 2); if (usernamePassword.length == 2) { try { proxyOptions.setCredentials( URLDecoder.decode(usernamePassword[0], StandardCharsets.UTF_8.toString()), URLDecoder.decode(usernamePassword[1], StandardCharsets.UTF_8.toString()) ); } catch (UnsupportedEncodingException e) { return null; } } } return proxyOptions; } catch (MalformedURLException ex) { LOGGER.warning(INVALID_AZURE_PROXY_URL, proxyProperty); return null; } } /* * Helper function that sanitizes 'NO_PROXY' into a Pattern safe string. */ private static ProxyOptions attemptToLoadJavaProxy(Configuration configuration, String type) { if (!Boolean.parseBoolean(configuration.get(JAVA_PROXY_PREREQUISITE))) { return null; } String host = configuration.get(type + "." + JAVA_PROXY_HOST); if (CoreUtils.isNullOrEmpty(host)) { return null; } int port; try { port = Integer.parseInt(configuration.get(type + "." + JAVA_PROXY_PORT)); } catch (NumberFormatException ex) { port = HTTPS.equals(type) ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT; } ProxyOptions proxyOptions = new ProxyOptions(Type.HTTP, new InetSocketAddress(host, port)); String nonProxyHostsString = configuration.get(JAVA_NON_PROXY_HOSTS); if (!CoreUtils.isNullOrEmpty(nonProxyHostsString)) { proxyOptions.nonProxyHosts = sanitizeJavaHttpNonProxyHosts(nonProxyHostsString); } String username = configuration.get(type + "." + JAVA_PROXY_USER); String password = configuration.get(type + "." + JAVA_PROXY_PASSWORD); if (username != null && password != null) { proxyOptions.setCredentials(username, password); } return proxyOptions; } /* * Helper function that sanitizes 'http.nonProxyHosts' into a Pattern safe string. */ private static String sanitizeJavaHttpNonProxyHosts(String nonProxyHostsString) { /* * The 'http.nonProxyHosts' system property is expected to be delimited by '|'. */ String[] nonProxyHosts = nonProxyHostsString.split("\\|"); for (int i = 0; i < nonProxyHosts.length; i++) { /* * 'http.nonProxyHosts' values are allowed to begin and end with '*' but this is an invalid value for a * pattern, so we need to qualify the quantifier with the match all '.' character. */ String prefixWildcard = ""; String suffixWildcard = ""; String body = nonProxyHosts[i]; if (body.startsWith("*")) { prefixWildcard = ".*"; body = body.substring(1); } if (body.endsWith("*")) { suffixWildcard = ".*"; body = body.substring(0, body.length() - 1); } /* * Replace the non-proxy host with the sanitized value. * * The body of the non-proxy host is quoted to handle scenarios such a '127.0.0.1' or '*.azure.com' where * without quoting the '.' in the string would be treated as the match any character instead of the literal * '.' character. */ nonProxyHosts[i] = prefixWildcard + Pattern.quote(body) + suffixWildcard; } return String.join("|", nonProxyHosts); } /** * The type of the proxy. */ public enum Type { /** * HTTP proxy type. */ HTTP(Proxy.Type.HTTP), /** * SOCKS4 proxy type. */ SOCKS4(Proxy.Type.SOCKS), /** * SOCKS5 proxy type. */ SOCKS5(Proxy.Type.SOCKS); private final Proxy.Type proxyType; Type(Proxy.Type proxyType) { this.proxyType = proxyType; } /** * Get the {@link Proxy.Type} equivalent of this type. * * @return the proxy type */ public Proxy.Type toProxyType() { return proxyType; } } }
This is not updated yet.
private ClientRegistration createClientBuilder(String id, AuthorizationProperties authz) { ClientRegistration.Builder result = createClientBuilder(id); List<String> scopes = authz.getScopes(); if (authz.isOnDemand()) { if (!scopes.contains("openid")) { scopes.add("openid"); } if (!scopes.contains("profile")) { scopes.add("profile"); } } result.scope(authz.getScopes()); return result.build(); }
}
private ClientRegistration createClientBuilder(String id, AuthorizationProperties authz) { ClientRegistration.Builder result = createClientBuilder(id); List<String> scopes = authz.getScopes(); if (authz.isOnDemand()) { if (!scopes.contains("openid")) { scopes.add("openid"); } if (!scopes.contains("profile")) { scopes.add("profile"); } } result.scope(scopes); return result.build(); }
class }) public AADWebAppClientRegistrationRepository clientRegistrationRepository() { return new AADWebAppClientRegistrationRepository( createDefaultClient(), createAuthzClients(), properties); }
class }) public AADWebAppClientRegistrationRepository clientRegistrationRepository() { return new AADWebAppClientRegistrationRepository( createDefaultClient(), createAuthzClients(), properties); }
sorry , i missed that
private ClientRegistration createClientBuilder(String id, AuthorizationProperties authz) { ClientRegistration.Builder result = createClientBuilder(id); List<String> scopes = authz.getScopes(); if (authz.isOnDemand()) { if (!scopes.contains("openid")) { scopes.add("openid"); } if (!scopes.contains("profile")) { scopes.add("profile"); } } result.scope(authz.getScopes()); return result.build(); }
}
private ClientRegistration createClientBuilder(String id, AuthorizationProperties authz) { ClientRegistration.Builder result = createClientBuilder(id); List<String> scopes = authz.getScopes(); if (authz.isOnDemand()) { if (!scopes.contains("openid")) { scopes.add("openid"); } if (!scopes.contains("profile")) { scopes.add("profile"); } } result.scope(scopes); return result.build(); }
class }) public AADWebAppClientRegistrationRepository clientRegistrationRepository() { return new AADWebAppClientRegistrationRepository( createDefaultClient(), createAuthzClients(), properties); }
class }) public AADWebAppClientRegistrationRepository clientRegistrationRepository() { return new AADWebAppClientRegistrationRepository( createDefaultClient(), createAuthzClients(), properties); }
done
private ClientRegistration createClientBuilder(String id, AuthorizationProperties authz) { ClientRegistration.Builder result = createClientBuilder(id); List<String> scopes = authz.getScopes(); if (authz.isOnDemand()) { if (!scopes.contains("openid")) { scopes.add("openid"); } if (!scopes.contains("profile")) { scopes.add("profile"); } } result.scope(authz.getScopes()); return result.build(); }
}
private ClientRegistration createClientBuilder(String id, AuthorizationProperties authz) { ClientRegistration.Builder result = createClientBuilder(id); List<String> scopes = authz.getScopes(); if (authz.isOnDemand()) { if (!scopes.contains("openid")) { scopes.add("openid"); } if (!scopes.contains("profile")) { scopes.add("profile"); } } result.scope(scopes); return result.build(); }
class }) public AADWebAppClientRegistrationRepository clientRegistrationRepository() { return new AADWebAppClientRegistrationRepository( createDefaultClient(), createAuthzClients(), properties); }
class }) public AADWebAppClientRegistrationRepository clientRegistrationRepository() { return new AADWebAppClientRegistrationRepository( createDefaultClient(), createAuthzClients(), properties); }
why this null check ?
static void verifyCanContinueOnException(CosmosException ex) { if (ex == null) { return; } if (ex instanceof PartitionKeyRangeGoneException) { throw ex; } if (ex instanceof PartitionKeyRangeIsSplittingException) { throw ex; } if (ex instanceof PartitionIsMigratingException) { throw ex; } String value = ex.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_VALIDATION_FAILURE); if (Strings.isNullOrWhiteSpace(value)) { return; } Integer result = Integers.tryParse(value); if (result != null && result == 1) { throw ex; } return; }
if (ex == null) {
static void verifyCanContinueOnException(CosmosException ex) { if (ex instanceof PartitionKeyRangeGoneException) { throw ex; } if (ex instanceof PartitionKeyRangeIsSplittingException) { throw ex; } if (ex instanceof PartitionIsMigratingException) { throw ex; } String value = ex.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_VALIDATION_FAILURE); if (Strings.isNullOrWhiteSpace(value)) { return; } Integer result = Integers.tryParse(value); if (result != null && result == 1) { throw ex; } return; }
class StoreReader { private final Logger logger = LoggerFactory.getLogger(StoreReader.class); private final TransportClient transportClient; private final AddressSelector addressSelector; private final ISessionContainer sessionContainer; private String lastReadAddress; public StoreReader( TransportClient transportClient, AddressSelector addressSelector, ISessionContainer sessionContainer) { this.transportClient = transportClient; this.addressSelector = addressSelector; this.sessionContainer = sessionContainer; } public Mono<List<StoreResult>> readMultipleReplicaAsync( RxDocumentServiceRequest entity, boolean includePrimary, int replicaCountToRead, boolean requiresValidLsn, boolean useSessionToken, ReadMode readMode) { return readMultipleReplicaAsync(entity, includePrimary, replicaCountToRead, requiresValidLsn, useSessionToken, readMode, false, false); } /** * Makes requests to multiple replicas at once and returns responses * @param entity RxDocumentServiceRequest * @param includePrimary flag to indicate whether to indicate primary replica in the reads * @param replicaCountToRead number of replicas to read from * @param requiresValidLsn flag to indicate whether a valid lsn is required to consider a response as valid * @param useSessionToken flag to indicate whether to use session token * @param readMode READ getMode * @param checkMinLSN set minimum required session lsn * @param forceReadAll reads from all available replicas to gather result from readsToRead number of replicas * @return ReadReplicaResult which indicates the LSN and whether Quorum was Met / Not Met etc */ public Mono<List<StoreResult>> readMultipleReplicaAsync( RxDocumentServiceRequest entity, boolean includePrimary, int replicaCountToRead, boolean requiresValidLsn, boolean useSessionToken, ReadMode readMode, boolean checkMinLSN, boolean forceReadAll) { if (entity.requestContext.timeoutHelper.isElapsed()) { return Mono.error(new GoneException()); } String originalSessionToken = entity.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); if (entity.requestContext.cosmosDiagnostics == null) { entity.requestContext.cosmosDiagnostics = entity.createCosmosDiagnostics(); } Mono<ReadReplicaResult> readQuorumResultObs = this.readMultipleReplicasInternalAsync( entity, includePrimary, replicaCountToRead, requiresValidLsn, useSessionToken, readMode, checkMinLSN, forceReadAll); return readQuorumResultObs.flatMap(readQuorumResult -> { if (entity.requestContext.performLocalRefreshOnGoneException && readQuorumResult.retryWithForceRefresh && !entity.requestContext.forceRefreshAddressCache) { if (entity.requestContext.timeoutHelper.isElapsed()) { return Mono.error(new GoneException()); } entity.requestContext.forceRefreshAddressCache = true; return this.readMultipleReplicasInternalAsync( entity, includePrimary, replicaCountToRead, requiresValidLsn, useSessionToken, readMode, false /*checkMinLSN*/, forceReadAll) .map(r -> r.responses); } else { return Mono.just(readQuorumResult.responses); } }).flux().doAfterTerminate(() -> SessionTokenHelper.setOriginalSessionToken(entity, originalSessionToken)).single(); } private Flux<ReadReplicaResult> earlyResultIfNotEnoughReplicas(List<Uri> replicaAddresses, RxDocumentServiceRequest request, int replicaCountToRead) { if (replicaAddresses.size() < replicaCountToRead) { if (!request.requestContext.forceRefreshAddressCache) { return Flux.just(new ReadReplicaResult(true /*retryWithForceRefresh*/, Collections.emptyList())); } else { return Flux.just(new ReadReplicaResult(false /*retryWithForceRefresh*/, Collections.emptyList())); } } else { return Flux.empty(); } } private Flux<StoreResult> toStoreResult(RxDocumentServiceRequest request, Pair<Flux<StoreResponse>, Uri> storeRespAndURI, ReadMode readMode, boolean requiresValidLsn) { return storeRespAndURI.getLeft() .flatMap(storeResponse -> { try { StoreResult storeResult = this.createAndRecordStoreResult( request, storeResponse, null, requiresValidLsn, readMode != ReadMode.Strong, storeRespAndURI.getRight()); BridgeInternal.getContactedReplicas(request.requestContext.cosmosDiagnostics).add(storeRespAndURI.getRight().getURI()); return Flux.just(storeResult); } catch (Exception e) { return Flux.error(e); } } ).onErrorResume(t -> { Throwable unwrappedException = Exceptions.unwrap(t); try { logger.debug("Exception is thrown while doing readMany: ", unwrappedException); Exception storeException = Utils.as(unwrappedException, Exception.class); if (storeException == null) { return Flux.error(unwrappedException); } StoreResult storeResult = this.createAndRecordStoreResult( request, null, storeException, requiresValidLsn, readMode != ReadMode.Strong, storeRespAndURI.getRight()); if (storeException instanceof TransportException) { BridgeInternal.getFailedReplicas(request.requestContext.cosmosDiagnostics).add(storeRespAndURI.getRight().getURI()); } return Flux.just(storeResult); } catch (Exception e) { return Flux.error(e); } }); } private Flux<List<StoreResult>> readFromReplicas(List<StoreResult> resultCollector, List<Uri> resolveApiResults, final AtomicInteger replicasToRead, RxDocumentServiceRequest entity, boolean includePrimary, int replicaCountToRead, boolean requiresValidLsn, boolean useSessionToken, ReadMode readMode, boolean checkMinLSN, boolean forceReadAll, final MutableVolatile<ISessionToken> requestSessionToken, final MutableVolatile<Boolean> hasGoneException, boolean enforceSessionCheck, final MutableVolatile<ReadReplicaResult> shortCircut) { if (entity.requestContext.timeoutHelper.isElapsed()) { return Flux.error(new GoneException()); } List<Pair<Flux<StoreResponse>, Uri>> readStoreTasks = new ArrayList<>(); int uriIndex = StoreReader.generateNextRandom(resolveApiResults.size()); while (resolveApiResults.size() > 0) { uriIndex = uriIndex % resolveApiResults.size(); Uri uri = resolveApiResults.get(uriIndex); Pair<Mono<StoreResponse>, Uri> res; try { res = this.readFromStoreAsync(resolveApiResults.get(uriIndex), entity); } catch (Exception e) { res = Pair.of(Mono.error(e), uri); } readStoreTasks.add(Pair.of(res.getLeft().flux(), res.getRight())); resolveApiResults.remove(uriIndex); if (!forceReadAll && readStoreTasks.size() == replicasToRead.get()) { break; } } replicasToRead.set(readStoreTasks.size() >= replicasToRead.get() ? 0 : replicasToRead.get() - readStoreTasks.size()); List<Flux<StoreResult>> storeResult = readStoreTasks .stream() .map(item -> toStoreResult(entity, item, readMode, requiresValidLsn)) .collect(Collectors.toList()); Flux<StoreResult> allStoreResults = Flux.merge(storeResult); return allStoreResults.collectList().onErrorResume(e -> { if (Exceptions.isMultiple(e)) { logger.info("Captured composite exception"); List<Throwable> exceptions = Exceptions.unwrapMultiple(e); assert !exceptions.isEmpty(); return Mono.error(exceptions.get(0)); } return Mono.error(e); }).map(newStoreResults -> { for (StoreResult srr : newStoreResults) { if (srr.isValid) { try { if (requestSessionToken.v == null || (srr.sessionToken != null && requestSessionToken.v.isValid(srr.sessionToken)) || (!enforceSessionCheck && !srr.isNotFoundException)) { resultCollector.add(srr); } } catch (Exception e) { } } hasGoneException.v = hasGoneException.v || (srr.isGoneException && !srr.isInvalidPartitionException); if (resultCollector.size() >= replicaCountToRead) { if (hasGoneException.v && !entity.requestContext.performedBackgroundAddressRefresh) { this.startBackgroundAddressRefresh(entity); entity.requestContext.performedBackgroundAddressRefresh = true; } shortCircut.v = new ReadReplicaResult(false, resultCollector); replicasToRead.set(0); return resultCollector; } replicasToRead.set(replicaCountToRead - resultCollector.size()); } return resultCollector; }).flux(); } private ReadReplicaResult createReadReplicaResult(List<StoreResult> responseResult, int replicaCountToRead, int resolvedAddressCount, boolean hasGoneException, RxDocumentServiceRequest entity) { if (responseResult.size() < replicaCountToRead) { if (logger.isDebugEnabled()) { logger.debug("Could not get quorum number of responses. " + "ValidResponsesReceived: {} ResponsesExpected: {}, ResolvedAddressCount: {}, ResponsesString: {}", responseResult.size(), replicaCountToRead, resolvedAddressCount, String.join(";", responseResult.stream().map(r -> r.toString()).collect(Collectors.toList()))); } if (hasGoneException) { if (!entity.requestContext.performLocalRefreshOnGoneException) { throw new GoneException(); } else if (!entity.requestContext.forceRefreshAddressCache) { return new ReadReplicaResult(true, responseResult); } } } return new ReadReplicaResult(false, responseResult); } /** * Makes requests to multiple replicas at once and returns responses * @param entity DocumentServiceRequest * @param includePrimary flag to indicate whether to indicate primary replica in the reads * @param replicaCountToRead number of replicas to read from * @param requiresValidLsn flag to indicate whether a valid lsn is required to consider a response as valid * @param useSessionToken flag to indicate whether to use session token * @param readMode READ getMode * @param checkMinLSN set minimum required session lsn * @param forceReadAll will read from all available replicas to put together result from readsToRead number of replicas * @return ReadReplicaResult which indicates the LSN and whether Quorum was Met / Not Met etc */ private Mono<ReadReplicaResult> readMultipleReplicasInternalAsync(RxDocumentServiceRequest entity, boolean includePrimary, int replicaCountToRead, boolean requiresValidLsn, boolean useSessionToken, ReadMode readMode, boolean checkMinLSN, boolean forceReadAll) { if (entity.requestContext.timeoutHelper.isElapsed()) { return Mono.error(new GoneException()); } String requestedCollectionId = null; if (entity.forceNameCacheRefresh) { requestedCollectionId = entity.requestContext.resolvedCollectionRid; } Mono<List<Uri>> resolveApiResultsObs = this.addressSelector.resolveAllUriAsync( entity, includePrimary, entity.requestContext.forceRefreshAddressCache); if (!StringUtils.isEmpty(requestedCollectionId) && !StringUtils.isEmpty(entity.requestContext.resolvedCollectionRid)) { if (!requestedCollectionId.equals(entity.requestContext.resolvedCollectionRid)) { this.sessionContainer.clearTokenByResourceId(requestedCollectionId); } } return resolveApiResultsObs.flux() .map(list -> Collections.synchronizedList(new ArrayList<>(list))) .flatMap( resolveApiResults -> { try { MutableVolatile<ISessionToken> requestSessionToken = new MutableVolatile<>(); if (useSessionToken) { SessionTokenHelper.setPartitionLocalSessionToken(entity, this.sessionContainer); if (checkMinLSN) { requestSessionToken.v = entity.requestContext.sessionToken; } } else { entity.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } Flux<ReadReplicaResult> y = earlyResultIfNotEnoughReplicas(resolveApiResults, entity, replicaCountToRead); return y.switchIfEmpty( Flux.defer(() -> { List<StoreResult> storeResultList = Collections.synchronizedList(new ArrayList<>()); AtomicInteger replicasToRead = new AtomicInteger(replicaCountToRead); boolean enforceSessionCheck = true; MutableVolatile<Boolean> hasGoneException = new MutableVolatile<>(false); MutableVolatile<ReadReplicaResult> shortCircuitResult = new MutableVolatile<>(); return Flux.defer(() -> readFromReplicas( storeResultList, resolveApiResults, replicasToRead, entity, includePrimary, replicaCountToRead, requiresValidLsn, useSessionToken, readMode, checkMinLSN, forceReadAll, requestSessionToken, hasGoneException, enforceSessionCheck, shortCircuitResult)) .repeat() .takeUntil(x -> { if (replicasToRead.get() > 0 && resolveApiResults.size() > 0) { return false; } else { return true; } }) .thenMany( Flux.defer(() -> { try { return Flux.just(createReadReplicaResult(storeResultList, replicaCountToRead, resolveApiResults.size(), hasGoneException.v, entity)); } catch (Exception e) { return Flux.error(e); } } )); })); } catch (Exception e) { return Flux.error(e); } } ).single(); } public Mono<StoreResult> readPrimaryAsync( RxDocumentServiceRequest entity, boolean requiresValidLsn, boolean useSessionToken) { if (entity.requestContext.timeoutHelper.isElapsed()) { return Mono.error(new GoneException()); } String originalSessionToken = entity.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); if (entity.requestContext.cosmosDiagnostics == null) { entity.requestContext.cosmosDiagnostics = entity.createCosmosDiagnostics(); } return this.readPrimaryInternalAsync( entity, requiresValidLsn, useSessionToken).flatMap( readQuorumResult -> { if (entity.requestContext.performLocalRefreshOnGoneException && readQuorumResult.retryWithForceRefresh && !entity.requestContext.forceRefreshAddressCache) { if (entity.requestContext.timeoutHelper.isElapsed()) { return Mono.error(new GoneException()); } entity.requestContext.forceRefreshAddressCache = true; return this.readPrimaryInternalAsync(entity, requiresValidLsn, useSessionToken); } else { return Mono.just(readQuorumResult); } } ).flatMap(readQuorumResult -> { if (readQuorumResult.responses.size() == 0) { return Mono.error(new GoneException(RMResources.Gone)); } return Mono.just(readQuorumResult.responses.get(0)); }).doOnEach(arg -> { try { SessionTokenHelper.setOriginalSessionToken(entity, originalSessionToken); } catch (Throwable throwable) { logger.error("Unexpected failure in handling orig [{}]: new [{}]", arg, throwable.getMessage(), throwable); } } ); } private Mono<ReadReplicaResult> readPrimaryInternalAsync( RxDocumentServiceRequest entity, boolean requiresValidLsn, boolean useSessionToken) { if (entity.requestContext.timeoutHelper.isElapsed()) { return Mono.error(new GoneException()); } Mono<Uri> primaryUriObs = this.addressSelector.resolvePrimaryUriAsync( entity, entity.requestContext.forceRefreshAddressCache); Mono<StoreResult> storeResultObs = primaryUriObs.flatMap( primaryUri -> { try { if (useSessionToken) { SessionTokenHelper.setPartitionLocalSessionToken(entity, this.sessionContainer); } else { entity.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } Pair<Mono<StoreResponse>, Uri> storeResponseObsAndUri = this.readFromStoreAsync(primaryUri, entity); return storeResponseObsAndUri.getLeft().flatMap( storeResponse -> { try { StoreResult storeResult = this.createAndRecordStoreResult( entity, storeResponse != null ? storeResponse : null, null, requiresValidLsn, true, storeResponse != null ? storeResponseObsAndUri.getRight() : null); return Mono.just(storeResult); } catch (CosmosException e) { return Mono.error(e); } } ); } catch (CosmosException e) { return Mono.error(e); } } ).onErrorResume(t -> { Throwable unwrappedException = Exceptions.unwrap(t); logger.debug("Exception is thrown while doing READ Primary", unwrappedException); Exception storeTaskException = Utils.as(unwrappedException, Exception.class); if (storeTaskException == null) { return Mono.error(unwrappedException); } try { StoreResult storeResult = this.createAndRecordStoreResult( entity, null, storeTaskException, requiresValidLsn, true, null); return Mono.just(storeResult); } catch (CosmosException e) { return Mono.error(e); } }); return storeResultObs.map(storeResult -> { if (storeResult.isGoneException && !storeResult.isInvalidPartitionException) { return new ReadReplicaResult(true, Collections.emptyList()); } return new ReadReplicaResult(false, Collections.singletonList(storeResult)); }); } private Pair<Mono<StoreResponse>, Uri> readFromStoreAsync( Uri physicalAddress, RxDocumentServiceRequest request) { if (request.requestContext.timeoutHelper.isElapsed()) { throw new GoneException(); } String ifNoneMatch = request.getHeaders().get(HttpConstants.HttpHeaders.IF_NONE_MATCH); String continuation = null; String maxPageSize = null; this.lastReadAddress = physicalAddress.toString(); if (request.getOperationType() == OperationType.ReadFeed || request.getOperationType() == OperationType.Query) { continuation = request.getHeaders().get(HttpConstants.HttpHeaders.CONTINUATION); maxPageSize = request.getHeaders().get(HttpConstants.HttpHeaders.PAGE_SIZE); if (continuation != null && continuation.contains(";")) { String[] parts = StringUtils.split(continuation, ';'); if (parts.length < 3) { throw new BadRequestException(String.format( RMResources.InvalidHeaderValue, continuation, HttpConstants.HttpHeaders.CONTINUATION)); } continuation = parts[0]; } request.setContinuation(continuation); } switch (request.getOperationType()) { case Read: case Head: { Mono<StoreResponse> storeResponseObs = this.transportClient.invokeResourceOperationAsync( physicalAddress, request); return Pair.of(storeResponseObs, physicalAddress); } case ReadFeed: case HeadFeed: case Query: case SqlQuery: case ExecuteJavaScript: { Mono<StoreResponse> storeResponseObs = StoreReader.completeActivity(this.transportClient.invokeResourceOperationAsync( physicalAddress, request), null); return Pair.of(storeResponseObs, physicalAddress); } default: throw new IllegalStateException(String.format("Unexpected operation setType {%s}", request.getOperationType())); } } private static Mono<StoreResponse> completeActivity(Mono<StoreResponse> task, Object activity) { return task; } StoreResult createAndRecordStoreResult( RxDocumentServiceRequest request, StoreResponse storeResponse, Exception responseException, boolean requiresValidLsn, boolean useLocalLSNBasedHeaders, Uri storePhysicalAddress) { StoreResult storeResult = this.createStoreResult(storeResponse, responseException, requiresValidLsn, useLocalLSNBasedHeaders, storePhysicalAddress); try { BridgeInternal.recordResponse(request.requestContext.cosmosDiagnostics, request, storeResult); if (request.requestContext.requestChargeTracker != null) { request.requestContext.requestChargeTracker.addCharge(storeResult.requestCharge); } } catch (Exception e){ logger.error("Unexpected failure while recording response", e); } if (responseException !=null) { verifyCanContinueOnException(storeResult.getException()); } return storeResult; } StoreResult createStoreResult(StoreResponse storeResponse, Exception responseException, boolean requiresValidLsn, boolean useLocalLSNBasedHeaders, Uri storePhysicalAddress) { if (responseException == null) { String headerValue = null; long quorumAckedLSN = -1; int currentReplicaSetSize = -1; int currentWriteQuorum = -1; long globalCommittedLSN = -1; int numberOfReadRegions = -1; long itemLSN = -1; if ((headerValue = storeResponse.getHeaderValue( useLocalLSNBasedHeaders ? WFConstants.BackendHeaders.QUORUM_ACKED_LOCAL_LSN : WFConstants.BackendHeaders.QUORUM_ACKED_LSN)) != null) { quorumAckedLSN = Long.parseLong(headerValue); } if ((headerValue = storeResponse.getHeaderValue(WFConstants.BackendHeaders.CURRENT_REPLICA_SET_SIZE)) != null) { currentReplicaSetSize = Integer.parseInt(headerValue); } if ((headerValue = storeResponse.getHeaderValue(WFConstants.BackendHeaders.CURRENT_WRITE_QUORUM)) != null) { currentWriteQuorum = Integer.parseInt(headerValue); } double requestCharge = 0; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE)) != null) { requestCharge = Double.parseDouble(headerValue); } if ((headerValue = storeResponse.getHeaderValue(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS)) != null) { numberOfReadRegions = Integer.parseInt(headerValue); } if ((headerValue = storeResponse.getHeaderValue(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN)) != null) { globalCommittedLSN = Long.parseLong(headerValue); } if ((headerValue = storeResponse.getHeaderValue( useLocalLSNBasedHeaders ? WFConstants.BackendHeaders.ITEM_LOCAL_LSN : WFConstants.BackendHeaders.ITEM_LSN)) != null) { itemLSN = Long.parseLong(headerValue); } long lsn = -1; if (useLocalLSNBasedHeaders) { if ((headerValue = storeResponse.getHeaderValue(WFConstants.BackendHeaders.LOCAL_LSN)) != null) { lsn = Long.parseLong(headerValue); } } else { lsn = storeResponse.getLSN(); } ISessionToken sessionToken = null; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN)) != null) { sessionToken = SessionTokenHelper.parse(headerValue); } return new StoreResult( /* storeResponse: */storeResponse, /* exception: */ null, /* partitionKeyRangeId: */ storeResponse.getPartitionKeyRangeId(), /* lsn: */ lsn, /* quorumAckedLsn: */ quorumAckedLSN, /* getRequestCharge: */ requestCharge, /* currentReplicaSetSize: */ currentReplicaSetSize, /* currentWriteQuorum: */ currentWriteQuorum, /* isValid: */true, /* storePhysicalAddress: */ storePhysicalAddress, /* globalCommittedLSN: */ globalCommittedLSN, /* numberOfReadRegions: */ numberOfReadRegions, /* itemLSN: */ itemLSN, /* getSessionToken: */ sessionToken); } else { Throwable unwrappedResponseExceptions = Exceptions.unwrap(responseException); CosmosException cosmosException = Utils.as(unwrappedResponseExceptions, CosmosException.class); if (cosmosException != null) { long quorumAckedLSN = -1; int currentReplicaSetSize = -1; int currentWriteQuorum = -1; long globalCommittedLSN = -1; int numberOfReadRegions = -1; String headerValue = cosmosException.getResponseHeaders().get(useLocalLSNBasedHeaders ? WFConstants.BackendHeaders.QUORUM_ACKED_LOCAL_LSN : WFConstants.BackendHeaders.QUORUM_ACKED_LSN); if (!Strings.isNullOrEmpty(headerValue)) { quorumAckedLSN = Long.parseLong(headerValue); } headerValue = cosmosException.getResponseHeaders().get(WFConstants.BackendHeaders.CURRENT_REPLICA_SET_SIZE); if (!Strings.isNullOrEmpty(headerValue)) { currentReplicaSetSize = Integer.parseInt(headerValue); } headerValue = cosmosException.getResponseHeaders().get(WFConstants.BackendHeaders.CURRENT_WRITE_QUORUM); if (!Strings.isNullOrEmpty(headerValue)) { currentReplicaSetSize = Integer.parseInt(headerValue); } double requestCharge = 0; headerValue = cosmosException.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (!Strings.isNullOrEmpty(headerValue)) { requestCharge = Double.parseDouble(headerValue); } headerValue = cosmosException.getResponseHeaders().get(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS); if (!Strings.isNullOrEmpty(headerValue)) { numberOfReadRegions = Integer.parseInt(headerValue); } headerValue = cosmosException.getResponseHeaders().get(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN); if (!Strings.isNullOrEmpty(headerValue)) { globalCommittedLSN = Integer.parseInt(headerValue); } long lsn = -1; if (useLocalLSNBasedHeaders) { headerValue = cosmosException.getResponseHeaders().get(WFConstants.BackendHeaders.LOCAL_LSN); if (!Strings.isNullOrEmpty(headerValue)) { lsn = Long.parseLong(headerValue); } } else { lsn = BridgeInternal.getLSN(cosmosException); } ISessionToken sessionToken = null; headerValue = cosmosException.getResponseHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); if (!Strings.isNullOrEmpty(headerValue)) { sessionToken = SessionTokenHelper.parse(headerValue); } return new StoreResult( /* storeResponse: */ (StoreResponse) null, /* exception: */ cosmosException, /* partitionKeyRangeId: */BridgeInternal.getPartitionKeyRangeId(cosmosException), /* lsn: */ lsn, /* quorumAckedLsn: */ quorumAckedLSN, /* getRequestCharge: */ requestCharge, /* currentReplicaSetSize: */ currentReplicaSetSize, /* currentWriteQuorum: */ currentWriteQuorum, /* isValid: */!requiresValidLsn || ((cosmosException.getStatusCode() != HttpConstants.StatusCodes.GONE || isSubStatusCode(cosmosException, HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE)) && lsn >= 0), /* storePhysicalAddress: */ storePhysicalAddress == null ? BridgeInternal.getRequestUri(cosmosException) : storePhysicalAddress, /* globalCommittedLSN: */ globalCommittedLSN, /* numberOfReadRegions: */ numberOfReadRegions, /* itemLSN: */ -1, sessionToken); } else { logger.error("Unexpected exception {} received while reading from store.", responseException.getMessage(), responseException); return new StoreResult( /* storeResponse: */ null, /* exception: */ new InternalServerErrorException(RMResources.InternalServerError), /* partitionKeyRangeId: */ (String) null, /* lsn: */ -1, /* quorumAckedLsn: */ -1, /* getRequestCharge: */ 0, /* currentReplicaSetSize: */ 0, /* currentWriteQuorum: */ 0, /* isValid: */ false, /* storePhysicalAddress: */ storePhysicalAddress, /* globalCommittedLSN: */-1, /* numberOfReadRegions: */ 0, /* itemLSN: */ -1, /* getSessionToken: */ null); } } } void startBackgroundAddressRefresh(RxDocumentServiceRequest request) { this.addressSelector.resolveAllUriAsync(request, true, true) .publishOn(Schedulers.elastic()) .subscribe( r -> { }, e -> logger.warn( "Background refresh of the addresses failed with {}", e.getMessage(), e) ); } private static int generateNextRandom(int maxValue) { return ThreadLocalRandom.current().nextInt(maxValue); } private static class ReadReplicaResult { public ReadReplicaResult(boolean retryWithForceRefresh, List<StoreResult> responses) { this.retryWithForceRefresh = retryWithForceRefresh; this.responses = responses; } public final boolean retryWithForceRefresh; public final List<StoreResult> responses; } }
class StoreReader { private final Logger logger = LoggerFactory.getLogger(StoreReader.class); private final TransportClient transportClient; private final AddressSelector addressSelector; private final ISessionContainer sessionContainer; private String lastReadAddress; public StoreReader( TransportClient transportClient, AddressSelector addressSelector, ISessionContainer sessionContainer) { this.transportClient = transportClient; this.addressSelector = addressSelector; this.sessionContainer = sessionContainer; } public Mono<List<StoreResult>> readMultipleReplicaAsync( RxDocumentServiceRequest entity, boolean includePrimary, int replicaCountToRead, boolean requiresValidLsn, boolean useSessionToken, ReadMode readMode) { return readMultipleReplicaAsync(entity, includePrimary, replicaCountToRead, requiresValidLsn, useSessionToken, readMode, false, false); } /** * Makes requests to multiple replicas at once and returns responses * @param entity RxDocumentServiceRequest * @param includePrimary flag to indicate whether to indicate primary replica in the reads * @param replicaCountToRead number of replicas to read from * @param requiresValidLsn flag to indicate whether a valid lsn is required to consider a response as valid * @param useSessionToken flag to indicate whether to use session token * @param readMode READ getMode * @param checkMinLSN set minimum required session lsn * @param forceReadAll reads from all available replicas to gather result from readsToRead number of replicas * @return ReadReplicaResult which indicates the LSN and whether Quorum was Met / Not Met etc */ public Mono<List<StoreResult>> readMultipleReplicaAsync( RxDocumentServiceRequest entity, boolean includePrimary, int replicaCountToRead, boolean requiresValidLsn, boolean useSessionToken, ReadMode readMode, boolean checkMinLSN, boolean forceReadAll) { if (entity.requestContext.timeoutHelper.isElapsed()) { return Mono.error(new GoneException()); } String originalSessionToken = entity.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); if (entity.requestContext.cosmosDiagnostics == null) { entity.requestContext.cosmosDiagnostics = entity.createCosmosDiagnostics(); } Mono<ReadReplicaResult> readQuorumResultObs = this.readMultipleReplicasInternalAsync( entity, includePrimary, replicaCountToRead, requiresValidLsn, useSessionToken, readMode, checkMinLSN, forceReadAll); return readQuorumResultObs.flatMap(readQuorumResult -> { if (entity.requestContext.performLocalRefreshOnGoneException && readQuorumResult.retryWithForceRefresh && !entity.requestContext.forceRefreshAddressCache) { if (entity.requestContext.timeoutHelper.isElapsed()) { return Mono.error(new GoneException()); } entity.requestContext.forceRefreshAddressCache = true; return this.readMultipleReplicasInternalAsync( entity, includePrimary, replicaCountToRead, requiresValidLsn, useSessionToken, readMode, false /*checkMinLSN*/, forceReadAll) .map(r -> r.responses); } else { return Mono.just(readQuorumResult.responses); } }).flux().doAfterTerminate(() -> SessionTokenHelper.setOriginalSessionToken(entity, originalSessionToken)).single(); } private Flux<ReadReplicaResult> earlyResultIfNotEnoughReplicas(List<Uri> replicaAddresses, RxDocumentServiceRequest request, int replicaCountToRead) { if (replicaAddresses.size() < replicaCountToRead) { if (!request.requestContext.forceRefreshAddressCache) { return Flux.just(new ReadReplicaResult(true /*retryWithForceRefresh*/, Collections.emptyList())); } else { return Flux.just(new ReadReplicaResult(false /*retryWithForceRefresh*/, Collections.emptyList())); } } else { return Flux.empty(); } } private Flux<StoreResult> toStoreResult(RxDocumentServiceRequest request, Pair<Flux<StoreResponse>, Uri> storeRespAndURI, ReadMode readMode, boolean requiresValidLsn) { return storeRespAndURI.getLeft() .flatMap(storeResponse -> { try { StoreResult storeResult = this.createAndRecordStoreResult( request, storeResponse, null, requiresValidLsn, readMode != ReadMode.Strong, storeRespAndURI.getRight()); BridgeInternal.getContactedReplicas(request.requestContext.cosmosDiagnostics).add(storeRespAndURI.getRight().getURI()); return Flux.just(storeResult); } catch (Exception e) { return Flux.error(e); } } ).onErrorResume(t -> { Throwable unwrappedException = Exceptions.unwrap(t); try { logger.debug("Exception is thrown while doing readMany: ", unwrappedException); Exception storeException = Utils.as(unwrappedException, Exception.class); if (storeException == null) { return Flux.error(unwrappedException); } StoreResult storeResult = this.createAndRecordStoreResult( request, null, storeException, requiresValidLsn, readMode != ReadMode.Strong, storeRespAndURI.getRight()); if (storeException instanceof TransportException) { BridgeInternal.getFailedReplicas(request.requestContext.cosmosDiagnostics).add(storeRespAndURI.getRight().getURI()); } return Flux.just(storeResult); } catch (Exception e) { return Flux.error(e); } }); } private Flux<List<StoreResult>> readFromReplicas(List<StoreResult> resultCollector, List<Uri> resolveApiResults, final AtomicInteger replicasToRead, RxDocumentServiceRequest entity, boolean includePrimary, int replicaCountToRead, boolean requiresValidLsn, boolean useSessionToken, ReadMode readMode, boolean checkMinLSN, boolean forceReadAll, final MutableVolatile<ISessionToken> requestSessionToken, final MutableVolatile<Boolean> hasGoneException, boolean enforceSessionCheck, final MutableVolatile<ReadReplicaResult> shortCircut) { if (entity.requestContext.timeoutHelper.isElapsed()) { return Flux.error(new GoneException()); } List<Pair<Flux<StoreResponse>, Uri>> readStoreTasks = new ArrayList<>(); int uriIndex = StoreReader.generateNextRandom(resolveApiResults.size()); while (resolveApiResults.size() > 0) { uriIndex = uriIndex % resolveApiResults.size(); Uri uri = resolveApiResults.get(uriIndex); Pair<Mono<StoreResponse>, Uri> res; try { res = this.readFromStoreAsync(resolveApiResults.get(uriIndex), entity); } catch (Exception e) { res = Pair.of(Mono.error(e), uri); } readStoreTasks.add(Pair.of(res.getLeft().flux(), res.getRight())); resolveApiResults.remove(uriIndex); if (!forceReadAll && readStoreTasks.size() == replicasToRead.get()) { break; } } replicasToRead.set(readStoreTasks.size() >= replicasToRead.get() ? 0 : replicasToRead.get() - readStoreTasks.size()); List<Flux<StoreResult>> storeResult = readStoreTasks .stream() .map(item -> toStoreResult(entity, item, readMode, requiresValidLsn)) .collect(Collectors.toList()); Flux<StoreResult> allStoreResults = Flux.merge(storeResult); return allStoreResults.collectList().onErrorResume(e -> { if (Exceptions.isMultiple(e)) { logger.info("Captured composite exception"); List<Throwable> exceptions = Exceptions.unwrapMultiple(e); assert !exceptions.isEmpty(); return Mono.error(exceptions.get(0)); } return Mono.error(e); }).map(newStoreResults -> { for (StoreResult srr : newStoreResults) { if (srr.isValid) { try { if (requestSessionToken.v == null || (srr.sessionToken != null && requestSessionToken.v.isValid(srr.sessionToken)) || (!enforceSessionCheck && !srr.isNotFoundException)) { resultCollector.add(srr); } } catch (Exception e) { } } hasGoneException.v = hasGoneException.v || (srr.isGoneException && !srr.isInvalidPartitionException); if (resultCollector.size() >= replicaCountToRead) { if (hasGoneException.v && !entity.requestContext.performedBackgroundAddressRefresh) { this.startBackgroundAddressRefresh(entity); entity.requestContext.performedBackgroundAddressRefresh = true; } shortCircut.v = new ReadReplicaResult(false, resultCollector); replicasToRead.set(0); return resultCollector; } replicasToRead.set(replicaCountToRead - resultCollector.size()); } return resultCollector; }).flux(); } private ReadReplicaResult createReadReplicaResult(List<StoreResult> responseResult, int replicaCountToRead, int resolvedAddressCount, boolean hasGoneException, RxDocumentServiceRequest entity) { if (responseResult.size() < replicaCountToRead) { if (logger.isDebugEnabled()) { logger.debug("Could not get quorum number of responses. " + "ValidResponsesReceived: {} ResponsesExpected: {}, ResolvedAddressCount: {}, ResponsesString: {}", responseResult.size(), replicaCountToRead, resolvedAddressCount, String.join(";", responseResult.stream().map(r -> r.toString()).collect(Collectors.toList()))); } if (hasGoneException) { if (!entity.requestContext.performLocalRefreshOnGoneException) { throw new GoneException(); } else if (!entity.requestContext.forceRefreshAddressCache) { return new ReadReplicaResult(true, responseResult); } } } return new ReadReplicaResult(false, responseResult); } /** * Makes requests to multiple replicas at once and returns responses * @param entity DocumentServiceRequest * @param includePrimary flag to indicate whether to indicate primary replica in the reads * @param replicaCountToRead number of replicas to read from * @param requiresValidLsn flag to indicate whether a valid lsn is required to consider a response as valid * @param useSessionToken flag to indicate whether to use session token * @param readMode READ getMode * @param checkMinLSN set minimum required session lsn * @param forceReadAll will read from all available replicas to put together result from readsToRead number of replicas * @return ReadReplicaResult which indicates the LSN and whether Quorum was Met / Not Met etc */ private Mono<ReadReplicaResult> readMultipleReplicasInternalAsync(RxDocumentServiceRequest entity, boolean includePrimary, int replicaCountToRead, boolean requiresValidLsn, boolean useSessionToken, ReadMode readMode, boolean checkMinLSN, boolean forceReadAll) { if (entity.requestContext.timeoutHelper.isElapsed()) { return Mono.error(new GoneException()); } String requestedCollectionId = null; if (entity.forceNameCacheRefresh) { requestedCollectionId = entity.requestContext.resolvedCollectionRid; } Mono<List<Uri>> resolveApiResultsObs = this.addressSelector.resolveAllUriAsync( entity, includePrimary, entity.requestContext.forceRefreshAddressCache); if (!StringUtils.isEmpty(requestedCollectionId) && !StringUtils.isEmpty(entity.requestContext.resolvedCollectionRid)) { if (!requestedCollectionId.equals(entity.requestContext.resolvedCollectionRid)) { this.sessionContainer.clearTokenByResourceId(requestedCollectionId); } } return resolveApiResultsObs.flux() .map(list -> Collections.synchronizedList(new ArrayList<>(list))) .flatMap( resolveApiResults -> { try { MutableVolatile<ISessionToken> requestSessionToken = new MutableVolatile<>(); if (useSessionToken) { SessionTokenHelper.setPartitionLocalSessionToken(entity, this.sessionContainer); if (checkMinLSN) { requestSessionToken.v = entity.requestContext.sessionToken; } } else { entity.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } Flux<ReadReplicaResult> y = earlyResultIfNotEnoughReplicas(resolveApiResults, entity, replicaCountToRead); return y.switchIfEmpty( Flux.defer(() -> { List<StoreResult> storeResultList = Collections.synchronizedList(new ArrayList<>()); AtomicInteger replicasToRead = new AtomicInteger(replicaCountToRead); boolean enforceSessionCheck = true; MutableVolatile<Boolean> hasGoneException = new MutableVolatile<>(false); MutableVolatile<ReadReplicaResult> shortCircuitResult = new MutableVolatile<>(); return Flux.defer(() -> readFromReplicas( storeResultList, resolveApiResults, replicasToRead, entity, includePrimary, replicaCountToRead, requiresValidLsn, useSessionToken, readMode, checkMinLSN, forceReadAll, requestSessionToken, hasGoneException, enforceSessionCheck, shortCircuitResult)) .repeat() .takeUntil(x -> { if (replicasToRead.get() > 0 && resolveApiResults.size() > 0) { return false; } else { return true; } }) .thenMany( Flux.defer(() -> { try { return Flux.just(createReadReplicaResult(storeResultList, replicaCountToRead, resolveApiResults.size(), hasGoneException.v, entity)); } catch (Exception e) { return Flux.error(e); } } )); })); } catch (Exception e) { return Flux.error(e); } } ).single(); } public Mono<StoreResult> readPrimaryAsync( RxDocumentServiceRequest entity, boolean requiresValidLsn, boolean useSessionToken) { if (entity.requestContext.timeoutHelper.isElapsed()) { return Mono.error(new GoneException()); } String originalSessionToken = entity.getHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); if (entity.requestContext.cosmosDiagnostics == null) { entity.requestContext.cosmosDiagnostics = entity.createCosmosDiagnostics(); } return this.readPrimaryInternalAsync( entity, requiresValidLsn, useSessionToken).flatMap( readQuorumResult -> { if (entity.requestContext.performLocalRefreshOnGoneException && readQuorumResult.retryWithForceRefresh && !entity.requestContext.forceRefreshAddressCache) { if (entity.requestContext.timeoutHelper.isElapsed()) { return Mono.error(new GoneException()); } entity.requestContext.forceRefreshAddressCache = true; return this.readPrimaryInternalAsync(entity, requiresValidLsn, useSessionToken); } else { return Mono.just(readQuorumResult); } } ).flatMap(readQuorumResult -> { if (readQuorumResult.responses.size() == 0) { return Mono.error(new GoneException(RMResources.Gone)); } return Mono.just(readQuorumResult.responses.get(0)); }).doOnEach(arg -> { try { SessionTokenHelper.setOriginalSessionToken(entity, originalSessionToken); } catch (Throwable throwable) { logger.error("Unexpected failure in handling orig [{}]: new [{}]", arg, throwable.getMessage(), throwable); } } ); } private Mono<ReadReplicaResult> readPrimaryInternalAsync( RxDocumentServiceRequest entity, boolean requiresValidLsn, boolean useSessionToken) { if (entity.requestContext.timeoutHelper.isElapsed()) { return Mono.error(new GoneException()); } Mono<Uri> primaryUriObs = this.addressSelector.resolvePrimaryUriAsync( entity, entity.requestContext.forceRefreshAddressCache); Mono<StoreResult> storeResultObs = primaryUriObs.flatMap( primaryUri -> { try { if (useSessionToken) { SessionTokenHelper.setPartitionLocalSessionToken(entity, this.sessionContainer); } else { entity.getHeaders().remove(HttpConstants.HttpHeaders.SESSION_TOKEN); } Pair<Mono<StoreResponse>, Uri> storeResponseObsAndUri = this.readFromStoreAsync(primaryUri, entity); return storeResponseObsAndUri.getLeft().flatMap( storeResponse -> { try { StoreResult storeResult = this.createAndRecordStoreResult( entity, storeResponse != null ? storeResponse : null, null, requiresValidLsn, true, storeResponse != null ? storeResponseObsAndUri.getRight() : null); return Mono.just(storeResult); } catch (CosmosException e) { return Mono.error(e); } } ); } catch (CosmosException e) { return Mono.error(e); } } ).onErrorResume(t -> { Throwable unwrappedException = Exceptions.unwrap(t); logger.debug("Exception is thrown while doing READ Primary", unwrappedException); Exception storeTaskException = Utils.as(unwrappedException, Exception.class); if (storeTaskException == null) { return Mono.error(unwrappedException); } try { StoreResult storeResult = this.createAndRecordStoreResult( entity, null, storeTaskException, requiresValidLsn, true, null); return Mono.just(storeResult); } catch (CosmosException e) { return Mono.error(e); } }); return storeResultObs.map(storeResult -> { if (storeResult.isGoneException && !storeResult.isInvalidPartitionException) { return new ReadReplicaResult(true, Collections.emptyList()); } return new ReadReplicaResult(false, Collections.singletonList(storeResult)); }); } private Pair<Mono<StoreResponse>, Uri> readFromStoreAsync( Uri physicalAddress, RxDocumentServiceRequest request) { if (request.requestContext.timeoutHelper.isElapsed()) { throw new GoneException(); } String ifNoneMatch = request.getHeaders().get(HttpConstants.HttpHeaders.IF_NONE_MATCH); String continuation = null; String maxPageSize = null; this.lastReadAddress = physicalAddress.toString(); if (request.getOperationType() == OperationType.ReadFeed || request.getOperationType() == OperationType.Query) { continuation = request.getHeaders().get(HttpConstants.HttpHeaders.CONTINUATION); maxPageSize = request.getHeaders().get(HttpConstants.HttpHeaders.PAGE_SIZE); if (continuation != null && continuation.contains(";")) { String[] parts = StringUtils.split(continuation, ';'); if (parts.length < 3) { throw new BadRequestException(String.format( RMResources.InvalidHeaderValue, continuation, HttpConstants.HttpHeaders.CONTINUATION)); } continuation = parts[0]; } request.setContinuation(continuation); } switch (request.getOperationType()) { case Read: case Head: { Mono<StoreResponse> storeResponseObs = this.transportClient.invokeResourceOperationAsync( physicalAddress, request); return Pair.of(storeResponseObs, physicalAddress); } case ReadFeed: case HeadFeed: case Query: case SqlQuery: case ExecuteJavaScript: { Mono<StoreResponse> storeResponseObs = StoreReader.completeActivity(this.transportClient.invokeResourceOperationAsync( physicalAddress, request), null); return Pair.of(storeResponseObs, physicalAddress); } default: throw new IllegalStateException(String.format("Unexpected operation setType {%s}", request.getOperationType())); } } private static Mono<StoreResponse> completeActivity(Mono<StoreResponse> task, Object activity) { return task; } StoreResult createAndRecordStoreResult( RxDocumentServiceRequest request, StoreResponse storeResponse, Exception responseException, boolean requiresValidLsn, boolean useLocalLSNBasedHeaders, Uri storePhysicalAddress) { StoreResult storeResult = this.createStoreResult(storeResponse, responseException, requiresValidLsn, useLocalLSNBasedHeaders, storePhysicalAddress); try { BridgeInternal.recordResponse(request.requestContext.cosmosDiagnostics, request, storeResult); if (request.requestContext.requestChargeTracker != null) { request.requestContext.requestChargeTracker.addCharge(storeResult.requestCharge); } } catch (Exception e){ logger.error("Unexpected failure while recording response", e); } if (responseException !=null) { verifyCanContinueOnException(storeResult.getException()); } return storeResult; } StoreResult createStoreResult(StoreResponse storeResponse, Exception responseException, boolean requiresValidLsn, boolean useLocalLSNBasedHeaders, Uri storePhysicalAddress) { if (responseException == null) { String headerValue = null; long quorumAckedLSN = -1; int currentReplicaSetSize = -1; int currentWriteQuorum = -1; long globalCommittedLSN = -1; int numberOfReadRegions = -1; long itemLSN = -1; if ((headerValue = storeResponse.getHeaderValue( useLocalLSNBasedHeaders ? WFConstants.BackendHeaders.QUORUM_ACKED_LOCAL_LSN : WFConstants.BackendHeaders.QUORUM_ACKED_LSN)) != null) { quorumAckedLSN = Long.parseLong(headerValue); } if ((headerValue = storeResponse.getHeaderValue(WFConstants.BackendHeaders.CURRENT_REPLICA_SET_SIZE)) != null) { currentReplicaSetSize = Integer.parseInt(headerValue); } if ((headerValue = storeResponse.getHeaderValue(WFConstants.BackendHeaders.CURRENT_WRITE_QUORUM)) != null) { currentWriteQuorum = Integer.parseInt(headerValue); } double requestCharge = 0; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE)) != null) { requestCharge = Double.parseDouble(headerValue); } if ((headerValue = storeResponse.getHeaderValue(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS)) != null) { numberOfReadRegions = Integer.parseInt(headerValue); } if ((headerValue = storeResponse.getHeaderValue(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN)) != null) { globalCommittedLSN = Long.parseLong(headerValue); } if ((headerValue = storeResponse.getHeaderValue( useLocalLSNBasedHeaders ? WFConstants.BackendHeaders.ITEM_LOCAL_LSN : WFConstants.BackendHeaders.ITEM_LSN)) != null) { itemLSN = Long.parseLong(headerValue); } long lsn = -1; if (useLocalLSNBasedHeaders) { if ((headerValue = storeResponse.getHeaderValue(WFConstants.BackendHeaders.LOCAL_LSN)) != null) { lsn = Long.parseLong(headerValue); } } else { lsn = storeResponse.getLSN(); } ISessionToken sessionToken = null; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN)) != null) { sessionToken = SessionTokenHelper.parse(headerValue); } return new StoreResult( /* storeResponse: */storeResponse, /* exception: */ null, /* partitionKeyRangeId: */ storeResponse.getPartitionKeyRangeId(), /* lsn: */ lsn, /* quorumAckedLsn: */ quorumAckedLSN, /* getRequestCharge: */ requestCharge, /* currentReplicaSetSize: */ currentReplicaSetSize, /* currentWriteQuorum: */ currentWriteQuorum, /* isValid: */true, /* storePhysicalAddress: */ storePhysicalAddress, /* globalCommittedLSN: */ globalCommittedLSN, /* numberOfReadRegions: */ numberOfReadRegions, /* itemLSN: */ itemLSN, /* getSessionToken: */ sessionToken); } else { Throwable unwrappedResponseExceptions = Exceptions.unwrap(responseException); CosmosException cosmosException = Utils.as(unwrappedResponseExceptions, CosmosException.class); if (cosmosException != null) { long quorumAckedLSN = -1; int currentReplicaSetSize = -1; int currentWriteQuorum = -1; long globalCommittedLSN = -1; int numberOfReadRegions = -1; String headerValue = cosmosException.getResponseHeaders().get(useLocalLSNBasedHeaders ? WFConstants.BackendHeaders.QUORUM_ACKED_LOCAL_LSN : WFConstants.BackendHeaders.QUORUM_ACKED_LSN); if (!Strings.isNullOrEmpty(headerValue)) { quorumAckedLSN = Long.parseLong(headerValue); } headerValue = cosmosException.getResponseHeaders().get(WFConstants.BackendHeaders.CURRENT_REPLICA_SET_SIZE); if (!Strings.isNullOrEmpty(headerValue)) { currentReplicaSetSize = Integer.parseInt(headerValue); } headerValue = cosmosException.getResponseHeaders().get(WFConstants.BackendHeaders.CURRENT_WRITE_QUORUM); if (!Strings.isNullOrEmpty(headerValue)) { currentReplicaSetSize = Integer.parseInt(headerValue); } double requestCharge = 0; headerValue = cosmosException.getResponseHeaders().get(HttpConstants.HttpHeaders.REQUEST_CHARGE); if (!Strings.isNullOrEmpty(headerValue)) { requestCharge = Double.parseDouble(headerValue); } headerValue = cosmosException.getResponseHeaders().get(WFConstants.BackendHeaders.NUMBER_OF_READ_REGIONS); if (!Strings.isNullOrEmpty(headerValue)) { numberOfReadRegions = Integer.parseInt(headerValue); } headerValue = cosmosException.getResponseHeaders().get(WFConstants.BackendHeaders.GLOBAL_COMMITTED_LSN); if (!Strings.isNullOrEmpty(headerValue)) { globalCommittedLSN = Integer.parseInt(headerValue); } long lsn = -1; if (useLocalLSNBasedHeaders) { headerValue = cosmosException.getResponseHeaders().get(WFConstants.BackendHeaders.LOCAL_LSN); if (!Strings.isNullOrEmpty(headerValue)) { lsn = Long.parseLong(headerValue); } } else { lsn = BridgeInternal.getLSN(cosmosException); } ISessionToken sessionToken = null; headerValue = cosmosException.getResponseHeaders().get(HttpConstants.HttpHeaders.SESSION_TOKEN); if (!Strings.isNullOrEmpty(headerValue)) { sessionToken = SessionTokenHelper.parse(headerValue); } return new StoreResult( /* storeResponse: */ (StoreResponse) null, /* exception: */ cosmosException, /* partitionKeyRangeId: */BridgeInternal.getPartitionKeyRangeId(cosmosException), /* lsn: */ lsn, /* quorumAckedLsn: */ quorumAckedLSN, /* getRequestCharge: */ requestCharge, /* currentReplicaSetSize: */ currentReplicaSetSize, /* currentWriteQuorum: */ currentWriteQuorum, /* isValid: */!requiresValidLsn || ((cosmosException.getStatusCode() != HttpConstants.StatusCodes.GONE || isSubStatusCode(cosmosException, HttpConstants.SubStatusCodes.NAME_CACHE_IS_STALE)) && lsn >= 0), /* storePhysicalAddress: */ storePhysicalAddress == null ? BridgeInternal.getRequestUri(cosmosException) : storePhysicalAddress, /* globalCommittedLSN: */ globalCommittedLSN, /* numberOfReadRegions: */ numberOfReadRegions, /* itemLSN: */ -1, sessionToken); } else { logger.error("Unexpected exception {} received while reading from store.", responseException.getMessage(), responseException); return new StoreResult( /* storeResponse: */ null, /* exception: */ new InternalServerErrorException(RMResources.InternalServerError), /* partitionKeyRangeId: */ (String) null, /* lsn: */ -1, /* quorumAckedLsn: */ -1, /* getRequestCharge: */ 0, /* currentReplicaSetSize: */ 0, /* currentWriteQuorum: */ 0, /* isValid: */ false, /* storePhysicalAddress: */ storePhysicalAddress, /* globalCommittedLSN: */-1, /* numberOfReadRegions: */ 0, /* itemLSN: */ -1, /* getSessionToken: */ null); } } } void startBackgroundAddressRefresh(RxDocumentServiceRequest request) { this.addressSelector.resolveAllUriAsync(request, true, true) .publishOn(Schedulers.elastic()) .subscribe( r -> { }, e -> logger.warn( "Background refresh of the addresses failed with {}", e.getMessage(), e) ); } private static int generateNextRandom(int maxValue) { return ThreadLocalRandom.current().nextInt(maxValue); } private static class ReadReplicaResult { public ReadReplicaResult(boolean retryWithForceRefresh, List<StoreResult> responses) { this.retryWithForceRefresh = retryWithForceRefresh; this.responses = responses; } public final boolean retryWithForceRefresh; public final List<StoreResult> responses; } }
This should be public instead (see comment above)
CosmosClientBuilder metadataCaches(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot) { this.state = metadataCachesSnapshot; return this; }
}
CosmosClientBuilder metadataCaches(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot) { this.state = metadataCachesSnapshot; return this; }
class CosmosClientBuilder { private Configs configs = new Configs(); private String serviceEndpoint; private String keyOrResourceToken; private CosmosClientMetadataCachesSnapshot state; private TokenCredential tokenCredential; private ConnectionPolicy connectionPolicy; private GatewayConnectionConfig gatewayConnectionConfig; private DirectConnectionConfig directConnectionConfig; private ConsistencyLevel desiredConsistencyLevel; private List<CosmosPermissionProperties> permissions; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; private AzureKeyCredential credential; private boolean sessionCapturingOverrideEnabled; private boolean connectionSharingAcrossClientsEnabled; private boolean contentResponseOnWriteEnabled; private String userAgentSuffix; private ThrottlingRetryOptions throttlingRetryOptions; private List<String> preferredRegions; private boolean endpointDiscoveryEnabled = true; private boolean multipleWriteRegionsEnabled = true; private boolean readRequestsFallbackEnabled = true; /** * Instantiates a new Cosmos client builder. */ public CosmosClientBuilder() { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); this.userAgentSuffix = ""; this.throttlingRetryOptions = new ThrottlingRetryOptions(); } CosmosClientMetadataCachesSnapshot metadataCaches() { return this.state; } /** * Session capturing is enabled by default for {@link ConsistencyLevel * For other consistency levels, it is not needed, unless if you need occasionally send requests with Session * Consistency while the client is not configured in session. * <p> * enabling Session capturing for Session mode has no effect. * @param sessionCapturingOverrideEnabled session capturing override * @return current cosmosClientBuilder */ public CosmosClientBuilder sessionCapturingOverrideEnabled(boolean sessionCapturingOverrideEnabled) { this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled; return this; } /** * Indicates if Session capturing is enabled for non Session modes. * The default is false. * * @return the session capturing override */ boolean isSessionCapturingOverrideEnabled() { return this.sessionCapturingOverrideEnabled; } /** * Enables connections sharing across multiple Cosmos Clients. The default is false. * * * <pre> * {@code * CosmosAsyncClient client1 = new CosmosClientBuilder() * .endpoint(serviceEndpoint1) * .key(key1) * .consistencyLevel(ConsistencyLevel.SESSION) * .connectionSharingAcrossClientsEnabled(true) * .buildAsyncClient(); * * CosmosAsyncClient client2 = new CosmosClientBuilder() * .endpoint(serviceEndpoint2) * .key(key2) * .consistencyLevel(ConsistencyLevel.SESSION) * .connectionSharingAcrossClientsEnabled(true) * .buildAsyncClient(); * * * } * </pre> * * When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts, * enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client. * * Please note, when setting this option, the connection configuration (e.g., socket timeout config, idle timeout * config) of the first instantiated client will be used for all other client instances. * * @param connectionSharingAcrossClientsEnabled connection sharing * @return current cosmosClientBuilder */ public CosmosClientBuilder connectionSharingAcrossClientsEnabled(boolean connectionSharingAcrossClientsEnabled) { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; return this; } /** * Indicates whether connection sharing is enabled. The default is false. * * When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts, * enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client. * * @return the connection sharing across multiple clients */ boolean isConnectionSharingAcrossClientsEnabled() { return this.connectionSharingAcrossClientsEnabled; } /** * Gets the token resolver * * @return the token resolver */ CosmosAuthorizationTokenResolver getAuthorizationTokenResolver() { return cosmosAuthorizationTokenResolver; } /** * Sets the token resolver * * @param cosmosAuthorizationTokenResolver the token resolver * @return current cosmosClientBuilder */ CosmosClientBuilder authorizationTokenResolver( CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) { this.cosmosAuthorizationTokenResolver = Objects.requireNonNull(cosmosAuthorizationTokenResolver, "'cosmosAuthorizationTokenResolver' cannot be null."); this.keyOrResourceToken = null; this.credential = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets the Azure Cosmos DB endpoint the SDK will connect to * * @return the endpoint */ String getEndpoint() { return serviceEndpoint; } /** * Sets the Azure Cosmos DB endpoint the SDK will connect to * * @param endpoint the service endpoint * @return current Builder */ public CosmosClientBuilder endpoint(String endpoint) { this.serviceEndpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); return this; } /** * Gets either a master or readonly key used to perform authentication * for accessing resource. * * @return the key */ String getKey() { return keyOrResourceToken; } /** * Sets either a master or readonly key used to perform authentication * for accessing resource. * * @param key master or readonly key * @return current Builder. */ public CosmosClientBuilder key(String key) { this.keyOrResourceToken = Objects.requireNonNull(key, "'key' cannot be null."); this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets a resource token used to perform authentication * for accessing resource. * * @return the resourceToken */ String getResourceToken() { return keyOrResourceToken; } /** * Sets a resource token used to perform authentication * for accessing resource. * * @param resourceToken resourceToken for authentication * @return current Builder. */ public CosmosClientBuilder resourceToken(String resourceToken) { this.keyOrResourceToken = Objects.requireNonNull(resourceToken, "'resourceToken' cannot be null."); this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets a token credential instance used to perform authentication * for accessing resource. * * @return the token credential. */ TokenCredential getTokenCredential() { return tokenCredential; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential {@link TokenCredential}. * @return the updated CosmosClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public CosmosClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.keyOrResourceToken = null; this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.permissions = null; return this; } /** * Gets the permission list, which contains the * resource tokens needed to access resources. * * @return the permission list */ List<CosmosPermissionProperties> getPermissions() { return permissions; } /** * Sets the permission list, which contains the * resource tokens needed to access resources. * * @param permissions Permission list for authentication. * @return current Builder. */ public CosmosClientBuilder permissions(List<CosmosPermissionProperties> permissions) { this.permissions = Objects.requireNonNull(permissions, "'permissions' cannot be null."); this.keyOrResourceToken = null; this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.tokenCredential = null; return this; } /** * Gets the {@link ConsistencyLevel} to be used * * By default, {@link ConsistencyLevel * * @return the consistency level */ ConsistencyLevel getConsistencyLevel() { return this.desiredConsistencyLevel; } /** * Sets the {@link ConsistencyLevel} to be used * * By default, {@link ConsistencyLevel * * @param desiredConsistencyLevel {@link ConsistencyLevel} * @return current Builder */ public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; return this; } /** * Gets the (@link ConnectionPolicy) to be used * * @return the connection policy */ ConnectionPolicy getConnectionPolicy() { return connectionPolicy; } /** * Gets the {@link AzureKeyCredential} to be used * * @return {@link AzureKeyCredential} */ AzureKeyCredential getCredential() { return credential; } /** * Sets the {@link AzureKeyCredential} to be used * * @param credential {@link AzureKeyCredential} * @return current cosmosClientBuilder */ public CosmosClientBuilder credential(AzureKeyCredential credential) { this.credential = Objects.requireNonNull(credential, "'cosmosKeyCredential' cannot be null."); this.keyOrResourceToken = null; this.cosmosAuthorizationTokenResolver = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets the boolean which indicates whether to only return the headers and status code in Cosmos DB response * in case of Create, Update and Delete operations on CosmosItem. * * If set to false (which is by default), service doesn't return payload in the response. It reduces networking * and CPU load by not sending the payload back over the network and serializing it * on the client. * * By-default, this is false. * * @return a boolean indicating whether payload will be included in the response or not */ boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } /** * Sets the boolean to only return the headers and status code in Cosmos DB response * in case of Create, Update and Delete operations on CosmosItem. * * If set to false (which is by default), service doesn't return payload in the response. It reduces networking * and CPU load by not sending the payload back over the network and serializing it on the client. * * This feature does not impact RU usage for read or write operations. * * By-default, this is false. * * @param contentResponseOnWriteEnabled a boolean indicating whether payload will be included in the response or not * @return current cosmosClientBuilder */ public CosmosClientBuilder contentResponseOnWriteEnabled(boolean contentResponseOnWriteEnabled) { this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; return this; } /** * Sets the default GATEWAY connection configuration to be used. * * @return current CosmosClientBuilder */ public CosmosClientBuilder gatewayMode() { this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig(); return this; } /** * Sets the GATEWAY connection configuration to be used. * * @param gatewayConnectionConfig gateway connection configuration * @return current CosmosClientBuilder */ public CosmosClientBuilder gatewayMode(GatewayConnectionConfig gatewayConnectionConfig) { this.gatewayConnectionConfig = gatewayConnectionConfig; return this; } /** * Sets the default DIRECT connection configuration to be used. * * By default, the builder is initialized with directMode() * * @return current CosmosClientBuilder */ public CosmosClientBuilder directMode() { this.directConnectionConfig = DirectConnectionConfig.getDefaultConfig(); return this; } /** * Sets the DIRECT connection configuration to be used. * * By default, the builder is initialized with directMode() * * @param directConnectionConfig direct connection configuration * @return current CosmosClientBuilder */ public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig) { this.directConnectionConfig = directConnectionConfig; return this; } /** * Sets the DIRECT connection configuration to be used. * gatewayConnectionConfig - represents basic configuration to be used for gateway client. * * Even in direct connection mode, some of the meta data operations go through gateway client, * * Setting gateway connection config in this API doesn't affect the connection mode, * which will be Direct in this case. * * @param directConnectionConfig direct connection configuration to be used * @param gatewayConnectionConfig gateway connection configuration to be used * @return current CosmosClientBuilder */ public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig, GatewayConnectionConfig gatewayConnectionConfig) { this.directConnectionConfig = directConnectionConfig; this.gatewayConnectionConfig = gatewayConnectionConfig; return this; } /** * sets the value of the user-agent suffix. * * @param userAgentSuffix The value to be appended to the user-agent header, this is * used for monitoring purposes. * * @return current CosmosClientBuilder */ public CosmosClientBuilder userAgentSuffix(String userAgentSuffix) { this.userAgentSuffix = userAgentSuffix; return this; } /** * Sets the retry policy options associated with the DocumentClient instance. * <p> * Properties in the RetryOptions class allow application to customize the built-in * retry policies. This property is optional. When it's not set, the SDK uses the * default values for configuring the retry policies. See RetryOptions class for * more details. * * @param throttlingRetryOptions the RetryOptions instance. * @return current CosmosClientBuilder * @throws IllegalArgumentException thrown if an error occurs */ public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) { this.throttlingRetryOptions = throttlingRetryOptions; return this; } /** * Sets the preferred regions for geo-replicated database accounts. For example, * "East US" as the preferred region. * <p> * When EnableEndpointDiscovery is true and PreferredRegions is non-empty, * the SDK will prefer to use the regions in the container in the order * they are specified to perform operations. * <p> * If EnableEndpointDiscovery is set to false, this property is ignored. * * @param preferredRegions the list of preferred regions. * @return current CosmosClientBuilder */ public CosmosClientBuilder preferredRegions(List<String> preferredRegions) { this.preferredRegions = preferredRegions; return this; } /** * Sets the flag to enable endpoint discovery for geo-replicated database accounts. * <p> * When EnableEndpointDiscovery is true, the SDK will automatically discover the * current write and read regions to ensure requests are sent to the correct region * based on the capability of the region and the user's preference. * <p> * The default value for this property is true indicating endpoint discovery is enabled. * * @param endpointDiscoveryEnabled true if EndpointDiscovery is enabled. * @return current CosmosClientBuilder */ public CosmosClientBuilder endpointDiscoveryEnabled(boolean endpointDiscoveryEnabled) { this.endpointDiscoveryEnabled = endpointDiscoveryEnabled; return this; } /** * Sets the flag to enable writes on any regions for geo-replicated database accounts in the Azure * Cosmos DB service. * <p> * When the value of this property is true, the SDK will direct write operations to * available writable regions of geo-replicated database account. Writable regions * are ordered by PreferredRegions property. Setting the property value * to true has no effect until EnableMultipleWriteRegions in DatabaseAccount * is also set to true. * <p> * DEFAULT value is true indicating that writes are directed to * available writable regions of geo-replicated database account. * * @param multipleWriteRegionsEnabled flag to enable writes on any regions for geo-replicated * database accounts. * @return current CosmosClientBuilder */ public CosmosClientBuilder multipleWriteRegionsEnabled(boolean multipleWriteRegionsEnabled) { this.multipleWriteRegionsEnabled = multipleWriteRegionsEnabled; return this; } /** * Sets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service. * <p> * DEFAULT value is true. * <p> * If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness, * The default is false for Bounded Staleness. * 1. {@link * 2. the Azure Cosmos DB account has more than one region * * @param readRequestsFallbackEnabled flag to enable reads to go to multiple regions configured on an account of * Azure Cosmos DB service. * @return current CosmosClientBuilder */ public CosmosClientBuilder readRequestsFallbackEnabled(boolean readRequestsFallbackEnabled) { this.readRequestsFallbackEnabled = readRequestsFallbackEnabled; return this; } /** * Gets the GATEWAY connection configuration to be used. * * @return gateway connection config */ GatewayConnectionConfig getGatewayConnectionConfig() { return gatewayConnectionConfig; } /** * Gets the DIRECT connection configuration to be used. * * @return direct connection config */ DirectConnectionConfig getDirectConnectionConfig() { return directConnectionConfig; } /** * Gets the value of user-agent suffix. * * @return the value of user-agent suffix. */ String getUserAgentSuffix() { return userAgentSuffix; } /** * Gets the retry policy options associated with the DocumentClient instance. * * @return the RetryOptions instance. */ ThrottlingRetryOptions getThrottlingRetryOptions() { return throttlingRetryOptions; } /** * Gets the preferred regions for geo-replicated database accounts * * @return the list of preferred region. */ List<String> getPreferredRegions() { return preferredRegions != null ? preferredRegions : Collections.emptyList(); } /** * Gets the flag to enable endpoint discovery for geo-replicated database accounts. * * @return whether endpoint discovery is enabled. */ boolean isEndpointDiscoveryEnabled() { return endpointDiscoveryEnabled; } /** * Gets the flag to enable writes on any regions for geo-replicated database accounts in the Azure * Cosmos DB service. * <p> * When the value of this property is true, the SDK will direct write operations to * available writable regions of geo-replicated database account. Writable regions * are ordered by PreferredRegions property. Setting the property value * to true has no effect until EnableMultipleWriteRegions in DatabaseAccount * is also set to true. * <p> * DEFAULT value is true indicating that writes are directed to * available writable regions of geo-replicated database account. * * @return flag to enable writes on any regions for geo-replicated database accounts. */ boolean isMultipleWriteRegionsEnabled() { return multipleWriteRegionsEnabled; } /** * Gets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service. * <p> * DEFAULT value is true. * <p> * If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness, * The default is false for Bounded Staleness. * 1. {@link * 2. the Azure Cosmos DB account has more than one region * * @return flag to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service. */ boolean isReadRequestsFallbackEnabled() { return readRequestsFallbackEnabled; } /** * Builds a cosmos async client with the provided properties * * @return CosmosAsyncClient */ public CosmosAsyncClient buildAsyncClient() { validateConfig(); buildConnectionPolicy(); return new CosmosAsyncClient(this); } /** * Builds a cosmos sync client with the provided properties * * @return CosmosClient */ public CosmosClient buildClient() { validateConfig(); buildConnectionPolicy(); return new CosmosClient(this); } private void buildConnectionPolicy() { if (this.directConnectionConfig != null) { this.connectionPolicy = new ConnectionPolicy(directConnectionConfig); if (this.gatewayConnectionConfig != null) { this.connectionPolicy.setMaxConnectionPoolSize(this.gatewayConnectionConfig.getMaxConnectionPoolSize()); this.connectionPolicy.setRequestTimeout(this.gatewayConnectionConfig.getRequestTimeout()); this.connectionPolicy.setIdleHttpConnectionTimeout(this.gatewayConnectionConfig.getIdleConnectionTimeout()); this.connectionPolicy.setProxy(this.gatewayConnectionConfig.getProxy()); } } else if (gatewayConnectionConfig != null) { this.connectionPolicy = new ConnectionPolicy(gatewayConnectionConfig); } this.connectionPolicy.setPreferredRegions(this.preferredRegions); this.connectionPolicy.setUserAgentSuffix(this.userAgentSuffix); this.connectionPolicy.setThrottlingRetryOptions(this.throttlingRetryOptions); this.connectionPolicy.setEndpointDiscoveryEnabled(this.endpointDiscoveryEnabled); this.connectionPolicy.setMultipleWriteRegionsEnabled(this.multipleWriteRegionsEnabled); this.connectionPolicy.setReadRequestsFallbackEnabled(this.readRequestsFallbackEnabled); } private void validateConfig() { URI uri; try { uri = new URI(serviceEndpoint); } catch (URISyntaxException e) { throw new IllegalArgumentException("invalid serviceEndpoint", e); } if (preferredRegions != null) { preferredRegions.stream().forEach( preferredRegion -> { Preconditions.checkArgument(StringUtils.trimToNull(preferredRegion) != null, "preferredRegion can't be empty"); String trimmedPreferredRegion = preferredRegion.toLowerCase(Locale.ROOT).replace(" ", ""); LocationHelper.getLocationEndpoint(uri, trimmedPreferredRegion); } ); } ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot buildAsyncClient client without service endpoint"); ifThrowIllegalArgException( this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty()) && this.credential == null && this.tokenCredential == null, "cannot buildAsyncClient client without any one of key, resource token, permissions, and " + "azure key credential"); ifThrowIllegalArgException(credential != null && StringUtils.isEmpty(credential.getKey()), "cannot buildAsyncClient client without key credential"); } Configs configs() { return configs; } /** * Configs * * @return current cosmosClientBuilder */ CosmosClientBuilder configs(Configs configs) { this.configs = configs; return this; } private void ifThrowIllegalArgException(boolean value, String error) { if (value) { throw new IllegalArgumentException(error); } } }
class CosmosClientBuilder { private Configs configs = new Configs(); private String serviceEndpoint; private String keyOrResourceToken; private CosmosClientMetadataCachesSnapshot state; private TokenCredential tokenCredential; private ConnectionPolicy connectionPolicy; private GatewayConnectionConfig gatewayConnectionConfig; private DirectConnectionConfig directConnectionConfig; private ConsistencyLevel desiredConsistencyLevel; private List<CosmosPermissionProperties> permissions; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; private AzureKeyCredential credential; private boolean sessionCapturingOverrideEnabled; private boolean connectionSharingAcrossClientsEnabled; private boolean contentResponseOnWriteEnabled; private String userAgentSuffix; private ThrottlingRetryOptions throttlingRetryOptions; private List<String> preferredRegions; private boolean endpointDiscoveryEnabled = true; private boolean multipleWriteRegionsEnabled = true; private boolean readRequestsFallbackEnabled = true; /** * Instantiates a new Cosmos client builder. */ public CosmosClientBuilder() { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); this.userAgentSuffix = ""; this.throttlingRetryOptions = new ThrottlingRetryOptions(); } CosmosClientMetadataCachesSnapshot metadataCaches() { return this.state; } /** * Session capturing is enabled by default for {@link ConsistencyLevel * For other consistency levels, it is not needed, unless if you need occasionally send requests with Session * Consistency while the client is not configured in session. * <p> * enabling Session capturing for Session mode has no effect. * @param sessionCapturingOverrideEnabled session capturing override * @return current cosmosClientBuilder */ public CosmosClientBuilder sessionCapturingOverrideEnabled(boolean sessionCapturingOverrideEnabled) { this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled; return this; } /** * Indicates if Session capturing is enabled for non Session modes. * The default is false. * * @return the session capturing override */ boolean isSessionCapturingOverrideEnabled() { return this.sessionCapturingOverrideEnabled; } /** * Enables connections sharing across multiple Cosmos Clients. The default is false. * * * <pre> * {@code * CosmosAsyncClient client1 = new CosmosClientBuilder() * .endpoint(serviceEndpoint1) * .key(key1) * .consistencyLevel(ConsistencyLevel.SESSION) * .connectionSharingAcrossClientsEnabled(true) * .buildAsyncClient(); * * CosmosAsyncClient client2 = new CosmosClientBuilder() * .endpoint(serviceEndpoint2) * .key(key2) * .consistencyLevel(ConsistencyLevel.SESSION) * .connectionSharingAcrossClientsEnabled(true) * .buildAsyncClient(); * * * } * </pre> * * When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts, * enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client. * * Please note, when setting this option, the connection configuration (e.g., socket timeout config, idle timeout * config) of the first instantiated client will be used for all other client instances. * * @param connectionSharingAcrossClientsEnabled connection sharing * @return current cosmosClientBuilder */ public CosmosClientBuilder connectionSharingAcrossClientsEnabled(boolean connectionSharingAcrossClientsEnabled) { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; return this; } /** * Indicates whether connection sharing is enabled. The default is false. * * When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts, * enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client. * * @return the connection sharing across multiple clients */ boolean isConnectionSharingAcrossClientsEnabled() { return this.connectionSharingAcrossClientsEnabled; } /** * Gets the token resolver * * @return the token resolver */ CosmosAuthorizationTokenResolver getAuthorizationTokenResolver() { return cosmosAuthorizationTokenResolver; } /** * Sets the token resolver * * @param cosmosAuthorizationTokenResolver the token resolver * @return current cosmosClientBuilder */ CosmosClientBuilder authorizationTokenResolver( CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) { this.cosmosAuthorizationTokenResolver = Objects.requireNonNull(cosmosAuthorizationTokenResolver, "'cosmosAuthorizationTokenResolver' cannot be null."); this.keyOrResourceToken = null; this.credential = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets the Azure Cosmos DB endpoint the SDK will connect to * * @return the endpoint */ String getEndpoint() { return serviceEndpoint; } /** * Sets the Azure Cosmos DB endpoint the SDK will connect to * * @param endpoint the service endpoint * @return current Builder */ public CosmosClientBuilder endpoint(String endpoint) { this.serviceEndpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); return this; } /** * Gets either a master or readonly key used to perform authentication * for accessing resource. * * @return the key */ String getKey() { return keyOrResourceToken; } /** * Sets either a master or readonly key used to perform authentication * for accessing resource. * * @param key master or readonly key * @return current Builder. */ public CosmosClientBuilder key(String key) { this.keyOrResourceToken = Objects.requireNonNull(key, "'key' cannot be null."); this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets a resource token used to perform authentication * for accessing resource. * * @return the resourceToken */ String getResourceToken() { return keyOrResourceToken; } /** * Sets a resource token used to perform authentication * for accessing resource. * * @param resourceToken resourceToken for authentication * @return current Builder. */ public CosmosClientBuilder resourceToken(String resourceToken) { this.keyOrResourceToken = Objects.requireNonNull(resourceToken, "'resourceToken' cannot be null."); this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets a token credential instance used to perform authentication * for accessing resource. * * @return the token credential. */ TokenCredential getTokenCredential() { return tokenCredential; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential {@link TokenCredential}. * @return the updated CosmosClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public CosmosClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.keyOrResourceToken = null; this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.permissions = null; return this; } /** * Gets the permission list, which contains the * resource tokens needed to access resources. * * @return the permission list */ List<CosmosPermissionProperties> getPermissions() { return permissions; } /** * Sets the permission list, which contains the * resource tokens needed to access resources. * * @param permissions Permission list for authentication. * @return current Builder. */ public CosmosClientBuilder permissions(List<CosmosPermissionProperties> permissions) { this.permissions = Objects.requireNonNull(permissions, "'permissions' cannot be null."); this.keyOrResourceToken = null; this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.tokenCredential = null; return this; } /** * Gets the {@link ConsistencyLevel} to be used * * By default, {@link ConsistencyLevel * * @return the consistency level */ ConsistencyLevel getConsistencyLevel() { return this.desiredConsistencyLevel; } /** * Sets the {@link ConsistencyLevel} to be used * * By default, {@link ConsistencyLevel * * @param desiredConsistencyLevel {@link ConsistencyLevel} * @return current Builder */ public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; return this; } /** * Gets the (@link ConnectionPolicy) to be used * * @return the connection policy */ ConnectionPolicy getConnectionPolicy() { return connectionPolicy; } /** * Gets the {@link AzureKeyCredential} to be used * * @return {@link AzureKeyCredential} */ AzureKeyCredential getCredential() { return credential; } /** * Sets the {@link AzureKeyCredential} to be used * * @param credential {@link AzureKeyCredential} * @return current cosmosClientBuilder */ public CosmosClientBuilder credential(AzureKeyCredential credential) { this.credential = Objects.requireNonNull(credential, "'cosmosKeyCredential' cannot be null."); this.keyOrResourceToken = null; this.cosmosAuthorizationTokenResolver = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets the boolean which indicates whether to only return the headers and status code in Cosmos DB response * in case of Create, Update and Delete operations on CosmosItem. * * If set to false (which is by default), service doesn't return payload in the response. It reduces networking * and CPU load by not sending the payload back over the network and serializing it * on the client. * * By-default, this is false. * * @return a boolean indicating whether payload will be included in the response or not */ boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } /** * Sets the boolean to only return the headers and status code in Cosmos DB response * in case of Create, Update and Delete operations on CosmosItem. * * If set to false (which is by default), service doesn't return payload in the response. It reduces networking * and CPU load by not sending the payload back over the network and serializing it on the client. * * This feature does not impact RU usage for read or write operations. * * By-default, this is false. * * @param contentResponseOnWriteEnabled a boolean indicating whether payload will be included in the response or not * @return current cosmosClientBuilder */ public CosmosClientBuilder contentResponseOnWriteEnabled(boolean contentResponseOnWriteEnabled) { this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; return this; } /** * Sets the default GATEWAY connection configuration to be used. * * @return current CosmosClientBuilder */ public CosmosClientBuilder gatewayMode() { this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig(); return this; } /** * Sets the GATEWAY connection configuration to be used. * * @param gatewayConnectionConfig gateway connection configuration * @return current CosmosClientBuilder */ public CosmosClientBuilder gatewayMode(GatewayConnectionConfig gatewayConnectionConfig) { this.gatewayConnectionConfig = gatewayConnectionConfig; return this; } /** * Sets the default DIRECT connection configuration to be used. * * By default, the builder is initialized with directMode() * * @return current CosmosClientBuilder */ public CosmosClientBuilder directMode() { this.directConnectionConfig = DirectConnectionConfig.getDefaultConfig(); return this; } /** * Sets the DIRECT connection configuration to be used. * * By default, the builder is initialized with directMode() * * @param directConnectionConfig direct connection configuration * @return current CosmosClientBuilder */ public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig) { this.directConnectionConfig = directConnectionConfig; return this; } /** * Sets the DIRECT connection configuration to be used. * gatewayConnectionConfig - represents basic configuration to be used for gateway client. * * Even in direct connection mode, some of the meta data operations go through gateway client, * * Setting gateway connection config in this API doesn't affect the connection mode, * which will be Direct in this case. * * @param directConnectionConfig direct connection configuration to be used * @param gatewayConnectionConfig gateway connection configuration to be used * @return current CosmosClientBuilder */ public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig, GatewayConnectionConfig gatewayConnectionConfig) { this.directConnectionConfig = directConnectionConfig; this.gatewayConnectionConfig = gatewayConnectionConfig; return this; } /** * sets the value of the user-agent suffix. * * @param userAgentSuffix The value to be appended to the user-agent header, this is * used for monitoring purposes. * * @return current CosmosClientBuilder */ public CosmosClientBuilder userAgentSuffix(String userAgentSuffix) { this.userAgentSuffix = userAgentSuffix; return this; } /** * Sets the retry policy options associated with the DocumentClient instance. * <p> * Properties in the RetryOptions class allow application to customize the built-in * retry policies. This property is optional. When it's not set, the SDK uses the * default values for configuring the retry policies. See RetryOptions class for * more details. * * @param throttlingRetryOptions the RetryOptions instance. * @return current CosmosClientBuilder * @throws IllegalArgumentException thrown if an error occurs */ public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) { this.throttlingRetryOptions = throttlingRetryOptions; return this; } /** * Sets the preferred regions for geo-replicated database accounts. For example, * "East US" as the preferred region. * <p> * When EnableEndpointDiscovery is true and PreferredRegions is non-empty, * the SDK will prefer to use the regions in the container in the order * they are specified to perform operations. * <p> * If EnableEndpointDiscovery is set to false, this property is ignored. * * @param preferredRegions the list of preferred regions. * @return current CosmosClientBuilder */ public CosmosClientBuilder preferredRegions(List<String> preferredRegions) { this.preferredRegions = preferredRegions; return this; } /** * Sets the flag to enable endpoint discovery for geo-replicated database accounts. * <p> * When EnableEndpointDiscovery is true, the SDK will automatically discover the * current write and read regions to ensure requests are sent to the correct region * based on the capability of the region and the user's preference. * <p> * The default value for this property is true indicating endpoint discovery is enabled. * * @param endpointDiscoveryEnabled true if EndpointDiscovery is enabled. * @return current CosmosClientBuilder */ public CosmosClientBuilder endpointDiscoveryEnabled(boolean endpointDiscoveryEnabled) { this.endpointDiscoveryEnabled = endpointDiscoveryEnabled; return this; } /** * Sets the flag to enable writes on any regions for geo-replicated database accounts in the Azure * Cosmos DB service. * <p> * When the value of this property is true, the SDK will direct write operations to * available writable regions of geo-replicated database account. Writable regions * are ordered by PreferredRegions property. Setting the property value * to true has no effect until EnableMultipleWriteRegions in DatabaseAccount * is also set to true. * <p> * DEFAULT value is true indicating that writes are directed to * available writable regions of geo-replicated database account. * * @param multipleWriteRegionsEnabled flag to enable writes on any regions for geo-replicated * database accounts. * @return current CosmosClientBuilder */ public CosmosClientBuilder multipleWriteRegionsEnabled(boolean multipleWriteRegionsEnabled) { this.multipleWriteRegionsEnabled = multipleWriteRegionsEnabled; return this; } /** * Sets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service. * <p> * DEFAULT value is true. * <p> * If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness, * The default is false for Bounded Staleness. * 1. {@link * 2. the Azure Cosmos DB account has more than one region * * @param readRequestsFallbackEnabled flag to enable reads to go to multiple regions configured on an account of * Azure Cosmos DB service. * @return current CosmosClientBuilder */ public CosmosClientBuilder readRequestsFallbackEnabled(boolean readRequestsFallbackEnabled) { this.readRequestsFallbackEnabled = readRequestsFallbackEnabled; return this; } /** * Gets the GATEWAY connection configuration to be used. * * @return gateway connection config */ GatewayConnectionConfig getGatewayConnectionConfig() { return gatewayConnectionConfig; } /** * Gets the DIRECT connection configuration to be used. * * @return direct connection config */ DirectConnectionConfig getDirectConnectionConfig() { return directConnectionConfig; } /** * Gets the value of user-agent suffix. * * @return the value of user-agent suffix. */ String getUserAgentSuffix() { return userAgentSuffix; } /** * Gets the retry policy options associated with the DocumentClient instance. * * @return the RetryOptions instance. */ ThrottlingRetryOptions getThrottlingRetryOptions() { return throttlingRetryOptions; } /** * Gets the preferred regions for geo-replicated database accounts * * @return the list of preferred region. */ List<String> getPreferredRegions() { return preferredRegions != null ? preferredRegions : Collections.emptyList(); } /** * Gets the flag to enable endpoint discovery for geo-replicated database accounts. * * @return whether endpoint discovery is enabled. */ boolean isEndpointDiscoveryEnabled() { return endpointDiscoveryEnabled; } /** * Gets the flag to enable writes on any regions for geo-replicated database accounts in the Azure * Cosmos DB service. * <p> * When the value of this property is true, the SDK will direct write operations to * available writable regions of geo-replicated database account. Writable regions * are ordered by PreferredRegions property. Setting the property value * to true has no effect until EnableMultipleWriteRegions in DatabaseAccount * is also set to true. * <p> * DEFAULT value is true indicating that writes are directed to * available writable regions of geo-replicated database account. * * @return flag to enable writes on any regions for geo-replicated database accounts. */ boolean isMultipleWriteRegionsEnabled() { return multipleWriteRegionsEnabled; } /** * Gets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service. * <p> * DEFAULT value is true. * <p> * If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness, * The default is false for Bounded Staleness. * 1. {@link * 2. the Azure Cosmos DB account has more than one region * * @return flag to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service. */ boolean isReadRequestsFallbackEnabled() { return readRequestsFallbackEnabled; } /** * Builds a cosmos async client with the provided properties * * @return CosmosAsyncClient */ public CosmosAsyncClient buildAsyncClient() { validateConfig(); buildConnectionPolicy(); return new CosmosAsyncClient(this); } /** * Builds a cosmos sync client with the provided properties * * @return CosmosClient */ public CosmosClient buildClient() { validateConfig(); buildConnectionPolicy(); return new CosmosClient(this); } private void buildConnectionPolicy() { if (this.directConnectionConfig != null) { this.connectionPolicy = new ConnectionPolicy(directConnectionConfig); if (this.gatewayConnectionConfig != null) { this.connectionPolicy.setMaxConnectionPoolSize(this.gatewayConnectionConfig.getMaxConnectionPoolSize()); this.connectionPolicy.setRequestTimeout(this.gatewayConnectionConfig.getRequestTimeout()); this.connectionPolicy.setIdleHttpConnectionTimeout(this.gatewayConnectionConfig.getIdleConnectionTimeout()); this.connectionPolicy.setProxy(this.gatewayConnectionConfig.getProxy()); } } else if (gatewayConnectionConfig != null) { this.connectionPolicy = new ConnectionPolicy(gatewayConnectionConfig); } this.connectionPolicy.setPreferredRegions(this.preferredRegions); this.connectionPolicy.setUserAgentSuffix(this.userAgentSuffix); this.connectionPolicy.setThrottlingRetryOptions(this.throttlingRetryOptions); this.connectionPolicy.setEndpointDiscoveryEnabled(this.endpointDiscoveryEnabled); this.connectionPolicy.setMultipleWriteRegionsEnabled(this.multipleWriteRegionsEnabled); this.connectionPolicy.setReadRequestsFallbackEnabled(this.readRequestsFallbackEnabled); } private void validateConfig() { URI uri; try { uri = new URI(serviceEndpoint); } catch (URISyntaxException e) { throw new IllegalArgumentException("invalid serviceEndpoint", e); } if (preferredRegions != null) { preferredRegions.stream().forEach( preferredRegion -> { Preconditions.checkArgument(StringUtils.trimToNull(preferredRegion) != null, "preferredRegion can't be empty"); String trimmedPreferredRegion = preferredRegion.toLowerCase(Locale.ROOT).replace(" ", ""); LocationHelper.getLocationEndpoint(uri, trimmedPreferredRegion); } ); } ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot buildAsyncClient client without service endpoint"); ifThrowIllegalArgException( this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty()) && this.credential == null && this.tokenCredential == null, "cannot buildAsyncClient client without any one of key, resource token, permissions, and " + "azure key credential"); ifThrowIllegalArgException(credential != null && StringUtils.isEmpty(credential.getKey()), "cannot buildAsyncClient client without key credential"); } Configs configs() { return configs; } /** * Configs * * @return current cosmosClientBuilder */ CosmosClientBuilder configs(Configs configs) { this.configs = configs; return this; } private void ifThrowIllegalArgException(boolean value, String error) { if (value) { throw new IllegalArgumentException(error); } } static { CosmosClientBuilderHelper.setCosmosClientBuilderAccessor( new CosmosClientBuilderHelper.CosmosClientBuilderAccessor() { @Override public void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder, CosmosClientMetadataCachesSnapshot metadataCache) { builder.metadataCaches(metadataCache); } @Override public CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder) { return builder.metadataCaches(); } }); } }
please see this commit: https://github.com/Azure/azure-sdk-for-java/pull/18184/commits/2f31869e9c5d84b230f1664070455ef4a20f81f2 The api is internal now and also removed bridge-internal. This is a pattern we can follow elsewhere too.
CosmosClientBuilder metadataCaches(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot) { this.state = metadataCachesSnapshot; return this; }
}
CosmosClientBuilder metadataCaches(CosmosClientMetadataCachesSnapshot metadataCachesSnapshot) { this.state = metadataCachesSnapshot; return this; }
class CosmosClientBuilder { private Configs configs = new Configs(); private String serviceEndpoint; private String keyOrResourceToken; private CosmosClientMetadataCachesSnapshot state; private TokenCredential tokenCredential; private ConnectionPolicy connectionPolicy; private GatewayConnectionConfig gatewayConnectionConfig; private DirectConnectionConfig directConnectionConfig; private ConsistencyLevel desiredConsistencyLevel; private List<CosmosPermissionProperties> permissions; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; private AzureKeyCredential credential; private boolean sessionCapturingOverrideEnabled; private boolean connectionSharingAcrossClientsEnabled; private boolean contentResponseOnWriteEnabled; private String userAgentSuffix; private ThrottlingRetryOptions throttlingRetryOptions; private List<String> preferredRegions; private boolean endpointDiscoveryEnabled = true; private boolean multipleWriteRegionsEnabled = true; private boolean readRequestsFallbackEnabled = true; /** * Instantiates a new Cosmos client builder. */ public CosmosClientBuilder() { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); this.userAgentSuffix = ""; this.throttlingRetryOptions = new ThrottlingRetryOptions(); } CosmosClientMetadataCachesSnapshot metadataCaches() { return this.state; } /** * Session capturing is enabled by default for {@link ConsistencyLevel * For other consistency levels, it is not needed, unless if you need occasionally send requests with Session * Consistency while the client is not configured in session. * <p> * enabling Session capturing for Session mode has no effect. * @param sessionCapturingOverrideEnabled session capturing override * @return current cosmosClientBuilder */ public CosmosClientBuilder sessionCapturingOverrideEnabled(boolean sessionCapturingOverrideEnabled) { this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled; return this; } /** * Indicates if Session capturing is enabled for non Session modes. * The default is false. * * @return the session capturing override */ boolean isSessionCapturingOverrideEnabled() { return this.sessionCapturingOverrideEnabled; } /** * Enables connections sharing across multiple Cosmos Clients. The default is false. * * * <pre> * {@code * CosmosAsyncClient client1 = new CosmosClientBuilder() * .endpoint(serviceEndpoint1) * .key(key1) * .consistencyLevel(ConsistencyLevel.SESSION) * .connectionSharingAcrossClientsEnabled(true) * .buildAsyncClient(); * * CosmosAsyncClient client2 = new CosmosClientBuilder() * .endpoint(serviceEndpoint2) * .key(key2) * .consistencyLevel(ConsistencyLevel.SESSION) * .connectionSharingAcrossClientsEnabled(true) * .buildAsyncClient(); * * * } * </pre> * * When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts, * enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client. * * Please note, when setting this option, the connection configuration (e.g., socket timeout config, idle timeout * config) of the first instantiated client will be used for all other client instances. * * @param connectionSharingAcrossClientsEnabled connection sharing * @return current cosmosClientBuilder */ public CosmosClientBuilder connectionSharingAcrossClientsEnabled(boolean connectionSharingAcrossClientsEnabled) { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; return this; } /** * Indicates whether connection sharing is enabled. The default is false. * * When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts, * enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client. * * @return the connection sharing across multiple clients */ boolean isConnectionSharingAcrossClientsEnabled() { return this.connectionSharingAcrossClientsEnabled; } /** * Gets the token resolver * * @return the token resolver */ CosmosAuthorizationTokenResolver getAuthorizationTokenResolver() { return cosmosAuthorizationTokenResolver; } /** * Sets the token resolver * * @param cosmosAuthorizationTokenResolver the token resolver * @return current cosmosClientBuilder */ CosmosClientBuilder authorizationTokenResolver( CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) { this.cosmosAuthorizationTokenResolver = Objects.requireNonNull(cosmosAuthorizationTokenResolver, "'cosmosAuthorizationTokenResolver' cannot be null."); this.keyOrResourceToken = null; this.credential = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets the Azure Cosmos DB endpoint the SDK will connect to * * @return the endpoint */ String getEndpoint() { return serviceEndpoint; } /** * Sets the Azure Cosmos DB endpoint the SDK will connect to * * @param endpoint the service endpoint * @return current Builder */ public CosmosClientBuilder endpoint(String endpoint) { this.serviceEndpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); return this; } /** * Gets either a master or readonly key used to perform authentication * for accessing resource. * * @return the key */ String getKey() { return keyOrResourceToken; } /** * Sets either a master or readonly key used to perform authentication * for accessing resource. * * @param key master or readonly key * @return current Builder. */ public CosmosClientBuilder key(String key) { this.keyOrResourceToken = Objects.requireNonNull(key, "'key' cannot be null."); this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets a resource token used to perform authentication * for accessing resource. * * @return the resourceToken */ String getResourceToken() { return keyOrResourceToken; } /** * Sets a resource token used to perform authentication * for accessing resource. * * @param resourceToken resourceToken for authentication * @return current Builder. */ public CosmosClientBuilder resourceToken(String resourceToken) { this.keyOrResourceToken = Objects.requireNonNull(resourceToken, "'resourceToken' cannot be null."); this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets a token credential instance used to perform authentication * for accessing resource. * * @return the token credential. */ TokenCredential getTokenCredential() { return tokenCredential; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential {@link TokenCredential}. * @return the updated CosmosClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public CosmosClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.keyOrResourceToken = null; this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.permissions = null; return this; } /** * Gets the permission list, which contains the * resource tokens needed to access resources. * * @return the permission list */ List<CosmosPermissionProperties> getPermissions() { return permissions; } /** * Sets the permission list, which contains the * resource tokens needed to access resources. * * @param permissions Permission list for authentication. * @return current Builder. */ public CosmosClientBuilder permissions(List<CosmosPermissionProperties> permissions) { this.permissions = Objects.requireNonNull(permissions, "'permissions' cannot be null."); this.keyOrResourceToken = null; this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.tokenCredential = null; return this; } /** * Gets the {@link ConsistencyLevel} to be used * * By default, {@link ConsistencyLevel * * @return the consistency level */ ConsistencyLevel getConsistencyLevel() { return this.desiredConsistencyLevel; } /** * Sets the {@link ConsistencyLevel} to be used * * By default, {@link ConsistencyLevel * * @param desiredConsistencyLevel {@link ConsistencyLevel} * @return current Builder */ public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; return this; } /** * Gets the (@link ConnectionPolicy) to be used * * @return the connection policy */ ConnectionPolicy getConnectionPolicy() { return connectionPolicy; } /** * Gets the {@link AzureKeyCredential} to be used * * @return {@link AzureKeyCredential} */ AzureKeyCredential getCredential() { return credential; } /** * Sets the {@link AzureKeyCredential} to be used * * @param credential {@link AzureKeyCredential} * @return current cosmosClientBuilder */ public CosmosClientBuilder credential(AzureKeyCredential credential) { this.credential = Objects.requireNonNull(credential, "'cosmosKeyCredential' cannot be null."); this.keyOrResourceToken = null; this.cosmosAuthorizationTokenResolver = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets the boolean which indicates whether to only return the headers and status code in Cosmos DB response * in case of Create, Update and Delete operations on CosmosItem. * * If set to false (which is by default), service doesn't return payload in the response. It reduces networking * and CPU load by not sending the payload back over the network and serializing it * on the client. * * By-default, this is false. * * @return a boolean indicating whether payload will be included in the response or not */ boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } /** * Sets the boolean to only return the headers and status code in Cosmos DB response * in case of Create, Update and Delete operations on CosmosItem. * * If set to false (which is by default), service doesn't return payload in the response. It reduces networking * and CPU load by not sending the payload back over the network and serializing it on the client. * * This feature does not impact RU usage for read or write operations. * * By-default, this is false. * * @param contentResponseOnWriteEnabled a boolean indicating whether payload will be included in the response or not * @return current cosmosClientBuilder */ public CosmosClientBuilder contentResponseOnWriteEnabled(boolean contentResponseOnWriteEnabled) { this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; return this; } /** * Sets the default GATEWAY connection configuration to be used. * * @return current CosmosClientBuilder */ public CosmosClientBuilder gatewayMode() { this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig(); return this; } /** * Sets the GATEWAY connection configuration to be used. * * @param gatewayConnectionConfig gateway connection configuration * @return current CosmosClientBuilder */ public CosmosClientBuilder gatewayMode(GatewayConnectionConfig gatewayConnectionConfig) { this.gatewayConnectionConfig = gatewayConnectionConfig; return this; } /** * Sets the default DIRECT connection configuration to be used. * * By default, the builder is initialized with directMode() * * @return current CosmosClientBuilder */ public CosmosClientBuilder directMode() { this.directConnectionConfig = DirectConnectionConfig.getDefaultConfig(); return this; } /** * Sets the DIRECT connection configuration to be used. * * By default, the builder is initialized with directMode() * * @param directConnectionConfig direct connection configuration * @return current CosmosClientBuilder */ public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig) { this.directConnectionConfig = directConnectionConfig; return this; } /** * Sets the DIRECT connection configuration to be used. * gatewayConnectionConfig - represents basic configuration to be used for gateway client. * * Even in direct connection mode, some of the meta data operations go through gateway client, * * Setting gateway connection config in this API doesn't affect the connection mode, * which will be Direct in this case. * * @param directConnectionConfig direct connection configuration to be used * @param gatewayConnectionConfig gateway connection configuration to be used * @return current CosmosClientBuilder */ public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig, GatewayConnectionConfig gatewayConnectionConfig) { this.directConnectionConfig = directConnectionConfig; this.gatewayConnectionConfig = gatewayConnectionConfig; return this; } /** * sets the value of the user-agent suffix. * * @param userAgentSuffix The value to be appended to the user-agent header, this is * used for monitoring purposes. * * @return current CosmosClientBuilder */ public CosmosClientBuilder userAgentSuffix(String userAgentSuffix) { this.userAgentSuffix = userAgentSuffix; return this; } /** * Sets the retry policy options associated with the DocumentClient instance. * <p> * Properties in the RetryOptions class allow application to customize the built-in * retry policies. This property is optional. When it's not set, the SDK uses the * default values for configuring the retry policies. See RetryOptions class for * more details. * * @param throttlingRetryOptions the RetryOptions instance. * @return current CosmosClientBuilder * @throws IllegalArgumentException thrown if an error occurs */ public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) { this.throttlingRetryOptions = throttlingRetryOptions; return this; } /** * Sets the preferred regions for geo-replicated database accounts. For example, * "East US" as the preferred region. * <p> * When EnableEndpointDiscovery is true and PreferredRegions is non-empty, * the SDK will prefer to use the regions in the container in the order * they are specified to perform operations. * <p> * If EnableEndpointDiscovery is set to false, this property is ignored. * * @param preferredRegions the list of preferred regions. * @return current CosmosClientBuilder */ public CosmosClientBuilder preferredRegions(List<String> preferredRegions) { this.preferredRegions = preferredRegions; return this; } /** * Sets the flag to enable endpoint discovery for geo-replicated database accounts. * <p> * When EnableEndpointDiscovery is true, the SDK will automatically discover the * current write and read regions to ensure requests are sent to the correct region * based on the capability of the region and the user's preference. * <p> * The default value for this property is true indicating endpoint discovery is enabled. * * @param endpointDiscoveryEnabled true if EndpointDiscovery is enabled. * @return current CosmosClientBuilder */ public CosmosClientBuilder endpointDiscoveryEnabled(boolean endpointDiscoveryEnabled) { this.endpointDiscoveryEnabled = endpointDiscoveryEnabled; return this; } /** * Sets the flag to enable writes on any regions for geo-replicated database accounts in the Azure * Cosmos DB service. * <p> * When the value of this property is true, the SDK will direct write operations to * available writable regions of geo-replicated database account. Writable regions * are ordered by PreferredRegions property. Setting the property value * to true has no effect until EnableMultipleWriteRegions in DatabaseAccount * is also set to true. * <p> * DEFAULT value is true indicating that writes are directed to * available writable regions of geo-replicated database account. * * @param multipleWriteRegionsEnabled flag to enable writes on any regions for geo-replicated * database accounts. * @return current CosmosClientBuilder */ public CosmosClientBuilder multipleWriteRegionsEnabled(boolean multipleWriteRegionsEnabled) { this.multipleWriteRegionsEnabled = multipleWriteRegionsEnabled; return this; } /** * Sets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service. * <p> * DEFAULT value is true. * <p> * If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness, * The default is false for Bounded Staleness. * 1. {@link * 2. the Azure Cosmos DB account has more than one region * * @param readRequestsFallbackEnabled flag to enable reads to go to multiple regions configured on an account of * Azure Cosmos DB service. * @return current CosmosClientBuilder */ public CosmosClientBuilder readRequestsFallbackEnabled(boolean readRequestsFallbackEnabled) { this.readRequestsFallbackEnabled = readRequestsFallbackEnabled; return this; } /** * Gets the GATEWAY connection configuration to be used. * * @return gateway connection config */ GatewayConnectionConfig getGatewayConnectionConfig() { return gatewayConnectionConfig; } /** * Gets the DIRECT connection configuration to be used. * * @return direct connection config */ DirectConnectionConfig getDirectConnectionConfig() { return directConnectionConfig; } /** * Gets the value of user-agent suffix. * * @return the value of user-agent suffix. */ String getUserAgentSuffix() { return userAgentSuffix; } /** * Gets the retry policy options associated with the DocumentClient instance. * * @return the RetryOptions instance. */ ThrottlingRetryOptions getThrottlingRetryOptions() { return throttlingRetryOptions; } /** * Gets the preferred regions for geo-replicated database accounts * * @return the list of preferred region. */ List<String> getPreferredRegions() { return preferredRegions != null ? preferredRegions : Collections.emptyList(); } /** * Gets the flag to enable endpoint discovery for geo-replicated database accounts. * * @return whether endpoint discovery is enabled. */ boolean isEndpointDiscoveryEnabled() { return endpointDiscoveryEnabled; } /** * Gets the flag to enable writes on any regions for geo-replicated database accounts in the Azure * Cosmos DB service. * <p> * When the value of this property is true, the SDK will direct write operations to * available writable regions of geo-replicated database account. Writable regions * are ordered by PreferredRegions property. Setting the property value * to true has no effect until EnableMultipleWriteRegions in DatabaseAccount * is also set to true. * <p> * DEFAULT value is true indicating that writes are directed to * available writable regions of geo-replicated database account. * * @return flag to enable writes on any regions for geo-replicated database accounts. */ boolean isMultipleWriteRegionsEnabled() { return multipleWriteRegionsEnabled; } /** * Gets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service. * <p> * DEFAULT value is true. * <p> * If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness, * The default is false for Bounded Staleness. * 1. {@link * 2. the Azure Cosmos DB account has more than one region * * @return flag to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service. */ boolean isReadRequestsFallbackEnabled() { return readRequestsFallbackEnabled; } /** * Builds a cosmos async client with the provided properties * * @return CosmosAsyncClient */ public CosmosAsyncClient buildAsyncClient() { validateConfig(); buildConnectionPolicy(); return new CosmosAsyncClient(this); } /** * Builds a cosmos sync client with the provided properties * * @return CosmosClient */ public CosmosClient buildClient() { validateConfig(); buildConnectionPolicy(); return new CosmosClient(this); } private void buildConnectionPolicy() { if (this.directConnectionConfig != null) { this.connectionPolicy = new ConnectionPolicy(directConnectionConfig); if (this.gatewayConnectionConfig != null) { this.connectionPolicy.setMaxConnectionPoolSize(this.gatewayConnectionConfig.getMaxConnectionPoolSize()); this.connectionPolicy.setRequestTimeout(this.gatewayConnectionConfig.getRequestTimeout()); this.connectionPolicy.setIdleHttpConnectionTimeout(this.gatewayConnectionConfig.getIdleConnectionTimeout()); this.connectionPolicy.setProxy(this.gatewayConnectionConfig.getProxy()); } } else if (gatewayConnectionConfig != null) { this.connectionPolicy = new ConnectionPolicy(gatewayConnectionConfig); } this.connectionPolicy.setPreferredRegions(this.preferredRegions); this.connectionPolicy.setUserAgentSuffix(this.userAgentSuffix); this.connectionPolicy.setThrottlingRetryOptions(this.throttlingRetryOptions); this.connectionPolicy.setEndpointDiscoveryEnabled(this.endpointDiscoveryEnabled); this.connectionPolicy.setMultipleWriteRegionsEnabled(this.multipleWriteRegionsEnabled); this.connectionPolicy.setReadRequestsFallbackEnabled(this.readRequestsFallbackEnabled); } private void validateConfig() { URI uri; try { uri = new URI(serviceEndpoint); } catch (URISyntaxException e) { throw new IllegalArgumentException("invalid serviceEndpoint", e); } if (preferredRegions != null) { preferredRegions.stream().forEach( preferredRegion -> { Preconditions.checkArgument(StringUtils.trimToNull(preferredRegion) != null, "preferredRegion can't be empty"); String trimmedPreferredRegion = preferredRegion.toLowerCase(Locale.ROOT).replace(" ", ""); LocationHelper.getLocationEndpoint(uri, trimmedPreferredRegion); } ); } ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot buildAsyncClient client without service endpoint"); ifThrowIllegalArgException( this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty()) && this.credential == null && this.tokenCredential == null, "cannot buildAsyncClient client without any one of key, resource token, permissions, and " + "azure key credential"); ifThrowIllegalArgException(credential != null && StringUtils.isEmpty(credential.getKey()), "cannot buildAsyncClient client without key credential"); } Configs configs() { return configs; } /** * Configs * * @return current cosmosClientBuilder */ CosmosClientBuilder configs(Configs configs) { this.configs = configs; return this; } private void ifThrowIllegalArgException(boolean value, String error) { if (value) { throw new IllegalArgumentException(error); } } }
class CosmosClientBuilder { private Configs configs = new Configs(); private String serviceEndpoint; private String keyOrResourceToken; private CosmosClientMetadataCachesSnapshot state; private TokenCredential tokenCredential; private ConnectionPolicy connectionPolicy; private GatewayConnectionConfig gatewayConnectionConfig; private DirectConnectionConfig directConnectionConfig; private ConsistencyLevel desiredConsistencyLevel; private List<CosmosPermissionProperties> permissions; private CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver; private AzureKeyCredential credential; private boolean sessionCapturingOverrideEnabled; private boolean connectionSharingAcrossClientsEnabled; private boolean contentResponseOnWriteEnabled; private String userAgentSuffix; private ThrottlingRetryOptions throttlingRetryOptions; private List<String> preferredRegions; private boolean endpointDiscoveryEnabled = true; private boolean multipleWriteRegionsEnabled = true; private boolean readRequestsFallbackEnabled = true; /** * Instantiates a new Cosmos client builder. */ public CosmosClientBuilder() { this.connectionPolicy = new ConnectionPolicy(DirectConnectionConfig.getDefaultConfig()); this.userAgentSuffix = ""; this.throttlingRetryOptions = new ThrottlingRetryOptions(); } CosmosClientMetadataCachesSnapshot metadataCaches() { return this.state; } /** * Session capturing is enabled by default for {@link ConsistencyLevel * For other consistency levels, it is not needed, unless if you need occasionally send requests with Session * Consistency while the client is not configured in session. * <p> * enabling Session capturing for Session mode has no effect. * @param sessionCapturingOverrideEnabled session capturing override * @return current cosmosClientBuilder */ public CosmosClientBuilder sessionCapturingOverrideEnabled(boolean sessionCapturingOverrideEnabled) { this.sessionCapturingOverrideEnabled = sessionCapturingOverrideEnabled; return this; } /** * Indicates if Session capturing is enabled for non Session modes. * The default is false. * * @return the session capturing override */ boolean isSessionCapturingOverrideEnabled() { return this.sessionCapturingOverrideEnabled; } /** * Enables connections sharing across multiple Cosmos Clients. The default is false. * * * <pre> * {@code * CosmosAsyncClient client1 = new CosmosClientBuilder() * .endpoint(serviceEndpoint1) * .key(key1) * .consistencyLevel(ConsistencyLevel.SESSION) * .connectionSharingAcrossClientsEnabled(true) * .buildAsyncClient(); * * CosmosAsyncClient client2 = new CosmosClientBuilder() * .endpoint(serviceEndpoint2) * .key(key2) * .consistencyLevel(ConsistencyLevel.SESSION) * .connectionSharingAcrossClientsEnabled(true) * .buildAsyncClient(); * * * } * </pre> * * When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts, * enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client. * * Please note, when setting this option, the connection configuration (e.g., socket timeout config, idle timeout * config) of the first instantiated client will be used for all other client instances. * * @param connectionSharingAcrossClientsEnabled connection sharing * @return current cosmosClientBuilder */ public CosmosClientBuilder connectionSharingAcrossClientsEnabled(boolean connectionSharingAcrossClientsEnabled) { this.connectionSharingAcrossClientsEnabled = connectionSharingAcrossClientsEnabled; return this; } /** * Indicates whether connection sharing is enabled. The default is false. * * When you have multiple instances of Cosmos Client in the same JVM interacting to multiple Cosmos accounts, * enabling this allows connection sharing in Direct mode if possible between instances of Cosmos Client. * * @return the connection sharing across multiple clients */ boolean isConnectionSharingAcrossClientsEnabled() { return this.connectionSharingAcrossClientsEnabled; } /** * Gets the token resolver * * @return the token resolver */ CosmosAuthorizationTokenResolver getAuthorizationTokenResolver() { return cosmosAuthorizationTokenResolver; } /** * Sets the token resolver * * @param cosmosAuthorizationTokenResolver the token resolver * @return current cosmosClientBuilder */ CosmosClientBuilder authorizationTokenResolver( CosmosAuthorizationTokenResolver cosmosAuthorizationTokenResolver) { this.cosmosAuthorizationTokenResolver = Objects.requireNonNull(cosmosAuthorizationTokenResolver, "'cosmosAuthorizationTokenResolver' cannot be null."); this.keyOrResourceToken = null; this.credential = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets the Azure Cosmos DB endpoint the SDK will connect to * * @return the endpoint */ String getEndpoint() { return serviceEndpoint; } /** * Sets the Azure Cosmos DB endpoint the SDK will connect to * * @param endpoint the service endpoint * @return current Builder */ public CosmosClientBuilder endpoint(String endpoint) { this.serviceEndpoint = Objects.requireNonNull(endpoint, "'endpoint' cannot be null."); return this; } /** * Gets either a master or readonly key used to perform authentication * for accessing resource. * * @return the key */ String getKey() { return keyOrResourceToken; } /** * Sets either a master or readonly key used to perform authentication * for accessing resource. * * @param key master or readonly key * @return current Builder. */ public CosmosClientBuilder key(String key) { this.keyOrResourceToken = Objects.requireNonNull(key, "'key' cannot be null."); this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets a resource token used to perform authentication * for accessing resource. * * @return the resourceToken */ String getResourceToken() { return keyOrResourceToken; } /** * Sets a resource token used to perform authentication * for accessing resource. * * @param resourceToken resourceToken for authentication * @return current Builder. */ public CosmosClientBuilder resourceToken(String resourceToken) { this.keyOrResourceToken = Objects.requireNonNull(resourceToken, "'resourceToken' cannot be null."); this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets a token credential instance used to perform authentication * for accessing resource. * * @return the token credential. */ TokenCredential getTokenCredential() { return tokenCredential; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential {@link TokenCredential}. * @return the updated CosmosClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public CosmosClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.keyOrResourceToken = null; this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.permissions = null; return this; } /** * Gets the permission list, which contains the * resource tokens needed to access resources. * * @return the permission list */ List<CosmosPermissionProperties> getPermissions() { return permissions; } /** * Sets the permission list, which contains the * resource tokens needed to access resources. * * @param permissions Permission list for authentication. * @return current Builder. */ public CosmosClientBuilder permissions(List<CosmosPermissionProperties> permissions) { this.permissions = Objects.requireNonNull(permissions, "'permissions' cannot be null."); this.keyOrResourceToken = null; this.cosmosAuthorizationTokenResolver = null; this.credential = null; this.tokenCredential = null; return this; } /** * Gets the {@link ConsistencyLevel} to be used * * By default, {@link ConsistencyLevel * * @return the consistency level */ ConsistencyLevel getConsistencyLevel() { return this.desiredConsistencyLevel; } /** * Sets the {@link ConsistencyLevel} to be used * * By default, {@link ConsistencyLevel * * @param desiredConsistencyLevel {@link ConsistencyLevel} * @return current Builder */ public CosmosClientBuilder consistencyLevel(ConsistencyLevel desiredConsistencyLevel) { this.desiredConsistencyLevel = desiredConsistencyLevel; return this; } /** * Gets the (@link ConnectionPolicy) to be used * * @return the connection policy */ ConnectionPolicy getConnectionPolicy() { return connectionPolicy; } /** * Gets the {@link AzureKeyCredential} to be used * * @return {@link AzureKeyCredential} */ AzureKeyCredential getCredential() { return credential; } /** * Sets the {@link AzureKeyCredential} to be used * * @param credential {@link AzureKeyCredential} * @return current cosmosClientBuilder */ public CosmosClientBuilder credential(AzureKeyCredential credential) { this.credential = Objects.requireNonNull(credential, "'cosmosKeyCredential' cannot be null."); this.keyOrResourceToken = null; this.cosmosAuthorizationTokenResolver = null; this.permissions = null; this.tokenCredential = null; return this; } /** * Gets the boolean which indicates whether to only return the headers and status code in Cosmos DB response * in case of Create, Update and Delete operations on CosmosItem. * * If set to false (which is by default), service doesn't return payload in the response. It reduces networking * and CPU load by not sending the payload back over the network and serializing it * on the client. * * By-default, this is false. * * @return a boolean indicating whether payload will be included in the response or not */ boolean isContentResponseOnWriteEnabled() { return contentResponseOnWriteEnabled; } /** * Sets the boolean to only return the headers and status code in Cosmos DB response * in case of Create, Update and Delete operations on CosmosItem. * * If set to false (which is by default), service doesn't return payload in the response. It reduces networking * and CPU load by not sending the payload back over the network and serializing it on the client. * * This feature does not impact RU usage for read or write operations. * * By-default, this is false. * * @param contentResponseOnWriteEnabled a boolean indicating whether payload will be included in the response or not * @return current cosmosClientBuilder */ public CosmosClientBuilder contentResponseOnWriteEnabled(boolean contentResponseOnWriteEnabled) { this.contentResponseOnWriteEnabled = contentResponseOnWriteEnabled; return this; } /** * Sets the default GATEWAY connection configuration to be used. * * @return current CosmosClientBuilder */ public CosmosClientBuilder gatewayMode() { this.gatewayConnectionConfig = GatewayConnectionConfig.getDefaultConfig(); return this; } /** * Sets the GATEWAY connection configuration to be used. * * @param gatewayConnectionConfig gateway connection configuration * @return current CosmosClientBuilder */ public CosmosClientBuilder gatewayMode(GatewayConnectionConfig gatewayConnectionConfig) { this.gatewayConnectionConfig = gatewayConnectionConfig; return this; } /** * Sets the default DIRECT connection configuration to be used. * * By default, the builder is initialized with directMode() * * @return current CosmosClientBuilder */ public CosmosClientBuilder directMode() { this.directConnectionConfig = DirectConnectionConfig.getDefaultConfig(); return this; } /** * Sets the DIRECT connection configuration to be used. * * By default, the builder is initialized with directMode() * * @param directConnectionConfig direct connection configuration * @return current CosmosClientBuilder */ public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig) { this.directConnectionConfig = directConnectionConfig; return this; } /** * Sets the DIRECT connection configuration to be used. * gatewayConnectionConfig - represents basic configuration to be used for gateway client. * * Even in direct connection mode, some of the meta data operations go through gateway client, * * Setting gateway connection config in this API doesn't affect the connection mode, * which will be Direct in this case. * * @param directConnectionConfig direct connection configuration to be used * @param gatewayConnectionConfig gateway connection configuration to be used * @return current CosmosClientBuilder */ public CosmosClientBuilder directMode(DirectConnectionConfig directConnectionConfig, GatewayConnectionConfig gatewayConnectionConfig) { this.directConnectionConfig = directConnectionConfig; this.gatewayConnectionConfig = gatewayConnectionConfig; return this; } /** * sets the value of the user-agent suffix. * * @param userAgentSuffix The value to be appended to the user-agent header, this is * used for monitoring purposes. * * @return current CosmosClientBuilder */ public CosmosClientBuilder userAgentSuffix(String userAgentSuffix) { this.userAgentSuffix = userAgentSuffix; return this; } /** * Sets the retry policy options associated with the DocumentClient instance. * <p> * Properties in the RetryOptions class allow application to customize the built-in * retry policies. This property is optional. When it's not set, the SDK uses the * default values for configuring the retry policies. See RetryOptions class for * more details. * * @param throttlingRetryOptions the RetryOptions instance. * @return current CosmosClientBuilder * @throws IllegalArgumentException thrown if an error occurs */ public CosmosClientBuilder throttlingRetryOptions(ThrottlingRetryOptions throttlingRetryOptions) { this.throttlingRetryOptions = throttlingRetryOptions; return this; } /** * Sets the preferred regions for geo-replicated database accounts. For example, * "East US" as the preferred region. * <p> * When EnableEndpointDiscovery is true and PreferredRegions is non-empty, * the SDK will prefer to use the regions in the container in the order * they are specified to perform operations. * <p> * If EnableEndpointDiscovery is set to false, this property is ignored. * * @param preferredRegions the list of preferred regions. * @return current CosmosClientBuilder */ public CosmosClientBuilder preferredRegions(List<String> preferredRegions) { this.preferredRegions = preferredRegions; return this; } /** * Sets the flag to enable endpoint discovery for geo-replicated database accounts. * <p> * When EnableEndpointDiscovery is true, the SDK will automatically discover the * current write and read regions to ensure requests are sent to the correct region * based on the capability of the region and the user's preference. * <p> * The default value for this property is true indicating endpoint discovery is enabled. * * @param endpointDiscoveryEnabled true if EndpointDiscovery is enabled. * @return current CosmosClientBuilder */ public CosmosClientBuilder endpointDiscoveryEnabled(boolean endpointDiscoveryEnabled) { this.endpointDiscoveryEnabled = endpointDiscoveryEnabled; return this; } /** * Sets the flag to enable writes on any regions for geo-replicated database accounts in the Azure * Cosmos DB service. * <p> * When the value of this property is true, the SDK will direct write operations to * available writable regions of geo-replicated database account. Writable regions * are ordered by PreferredRegions property. Setting the property value * to true has no effect until EnableMultipleWriteRegions in DatabaseAccount * is also set to true. * <p> * DEFAULT value is true indicating that writes are directed to * available writable regions of geo-replicated database account. * * @param multipleWriteRegionsEnabled flag to enable writes on any regions for geo-replicated * database accounts. * @return current CosmosClientBuilder */ public CosmosClientBuilder multipleWriteRegionsEnabled(boolean multipleWriteRegionsEnabled) { this.multipleWriteRegionsEnabled = multipleWriteRegionsEnabled; return this; } /** * Sets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service. * <p> * DEFAULT value is true. * <p> * If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness, * The default is false for Bounded Staleness. * 1. {@link * 2. the Azure Cosmos DB account has more than one region * * @param readRequestsFallbackEnabled flag to enable reads to go to multiple regions configured on an account of * Azure Cosmos DB service. * @return current CosmosClientBuilder */ public CosmosClientBuilder readRequestsFallbackEnabled(boolean readRequestsFallbackEnabled) { this.readRequestsFallbackEnabled = readRequestsFallbackEnabled; return this; } /** * Gets the GATEWAY connection configuration to be used. * * @return gateway connection config */ GatewayConnectionConfig getGatewayConnectionConfig() { return gatewayConnectionConfig; } /** * Gets the DIRECT connection configuration to be used. * * @return direct connection config */ DirectConnectionConfig getDirectConnectionConfig() { return directConnectionConfig; } /** * Gets the value of user-agent suffix. * * @return the value of user-agent suffix. */ String getUserAgentSuffix() { return userAgentSuffix; } /** * Gets the retry policy options associated with the DocumentClient instance. * * @return the RetryOptions instance. */ ThrottlingRetryOptions getThrottlingRetryOptions() { return throttlingRetryOptions; } /** * Gets the preferred regions for geo-replicated database accounts * * @return the list of preferred region. */ List<String> getPreferredRegions() { return preferredRegions != null ? preferredRegions : Collections.emptyList(); } /** * Gets the flag to enable endpoint discovery for geo-replicated database accounts. * * @return whether endpoint discovery is enabled. */ boolean isEndpointDiscoveryEnabled() { return endpointDiscoveryEnabled; } /** * Gets the flag to enable writes on any regions for geo-replicated database accounts in the Azure * Cosmos DB service. * <p> * When the value of this property is true, the SDK will direct write operations to * available writable regions of geo-replicated database account. Writable regions * are ordered by PreferredRegions property. Setting the property value * to true has no effect until EnableMultipleWriteRegions in DatabaseAccount * is also set to true. * <p> * DEFAULT value is true indicating that writes are directed to * available writable regions of geo-replicated database account. * * @return flag to enable writes on any regions for geo-replicated database accounts. */ boolean isMultipleWriteRegionsEnabled() { return multipleWriteRegionsEnabled; } /** * Gets whether to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service. * <p> * DEFAULT value is true. * <p> * If this property is not set, the default is true for all Consistency Levels other than Bounded Staleness, * The default is false for Bounded Staleness. * 1. {@link * 2. the Azure Cosmos DB account has more than one region * * @return flag to allow for reads to go to multiple regions configured on an account of Azure Cosmos DB service. */ boolean isReadRequestsFallbackEnabled() { return readRequestsFallbackEnabled; } /** * Builds a cosmos async client with the provided properties * * @return CosmosAsyncClient */ public CosmosAsyncClient buildAsyncClient() { validateConfig(); buildConnectionPolicy(); return new CosmosAsyncClient(this); } /** * Builds a cosmos sync client with the provided properties * * @return CosmosClient */ public CosmosClient buildClient() { validateConfig(); buildConnectionPolicy(); return new CosmosClient(this); } private void buildConnectionPolicy() { if (this.directConnectionConfig != null) { this.connectionPolicy = new ConnectionPolicy(directConnectionConfig); if (this.gatewayConnectionConfig != null) { this.connectionPolicy.setMaxConnectionPoolSize(this.gatewayConnectionConfig.getMaxConnectionPoolSize()); this.connectionPolicy.setRequestTimeout(this.gatewayConnectionConfig.getRequestTimeout()); this.connectionPolicy.setIdleHttpConnectionTimeout(this.gatewayConnectionConfig.getIdleConnectionTimeout()); this.connectionPolicy.setProxy(this.gatewayConnectionConfig.getProxy()); } } else if (gatewayConnectionConfig != null) { this.connectionPolicy = new ConnectionPolicy(gatewayConnectionConfig); } this.connectionPolicy.setPreferredRegions(this.preferredRegions); this.connectionPolicy.setUserAgentSuffix(this.userAgentSuffix); this.connectionPolicy.setThrottlingRetryOptions(this.throttlingRetryOptions); this.connectionPolicy.setEndpointDiscoveryEnabled(this.endpointDiscoveryEnabled); this.connectionPolicy.setMultipleWriteRegionsEnabled(this.multipleWriteRegionsEnabled); this.connectionPolicy.setReadRequestsFallbackEnabled(this.readRequestsFallbackEnabled); } private void validateConfig() { URI uri; try { uri = new URI(serviceEndpoint); } catch (URISyntaxException e) { throw new IllegalArgumentException("invalid serviceEndpoint", e); } if (preferredRegions != null) { preferredRegions.stream().forEach( preferredRegion -> { Preconditions.checkArgument(StringUtils.trimToNull(preferredRegion) != null, "preferredRegion can't be empty"); String trimmedPreferredRegion = preferredRegion.toLowerCase(Locale.ROOT).replace(" ", ""); LocationHelper.getLocationEndpoint(uri, trimmedPreferredRegion); } ); } ifThrowIllegalArgException(this.serviceEndpoint == null, "cannot buildAsyncClient client without service endpoint"); ifThrowIllegalArgException( this.keyOrResourceToken == null && (permissions == null || permissions.isEmpty()) && this.credential == null && this.tokenCredential == null, "cannot buildAsyncClient client without any one of key, resource token, permissions, and " + "azure key credential"); ifThrowIllegalArgException(credential != null && StringUtils.isEmpty(credential.getKey()), "cannot buildAsyncClient client without key credential"); } Configs configs() { return configs; } /** * Configs * * @return current cosmosClientBuilder */ CosmosClientBuilder configs(Configs configs) { this.configs = configs; return this; } private void ifThrowIllegalArgException(boolean value, String error) { if (value) { throw new IllegalArgumentException(error); } } static { CosmosClientBuilderHelper.setCosmosClientBuilderAccessor( new CosmosClientBuilderHelper.CosmosClientBuilderAccessor() { @Override public void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder, CosmosClientMetadataCachesSnapshot metadataCache) { builder.metadataCaches(metadataCache); } @Override public CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder) { return builder.metadataCaches(); } }); } }
This is not correct?
public void setup() { AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); addInlinedPropertiesToEnvironment( context, AAD_PROPERTY_PREFIX + "user-group.allowed-groups = group1", AAD_PROPERTY_PREFIX + "tenant-id = fake-tenant-id", AAD_PROPERTY_PREFIX + "client-id = fake-client-id", AAD_PROPERTY_PREFIX + "client-secret = fake-client-secret", AAD_PROPERTY_PREFIX + "webApiClients.fake-graph.scopes = https: ); context.register(AADResourceServerOboConfiguration.class); context.refresh(); clientRegistrationsRepo = context.getBean(AADOboClientRegistrationRepository.class); }
AAD_PROPERTY_PREFIX + "webApiClients.fake-graph.scopes = https:
public void setup() { AnnotationConfigApplicationContext context = new AnnotationConfigApplicationContext(); addInlinedPropertiesToEnvironment( context, AAD_PROPERTY_PREFIX + "tenant-id = fake-tenant-id", AAD_PROPERTY_PREFIX + "client-id = fake-client-id", AAD_PROPERTY_PREFIX + "client-secret = fake-client-secret", AAD_PROPERTY_PREFIX + "authorization.fake-graph.scopes = https: ); context.register(AADResourceServerOboConfiguration.class); context.refresh(); clientRegistrationsRepo = context.getBean(InMemoryClientRegistrationRepository.class); }
class AADOAuth2OboAuthorizedClientRepositoryTest { private static final String OBO_ACCESS_TOKEN_1 = "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImtnMkxZczJUMENUaklmajRydDZKSXluZW4zOCIsImtpZCI6ImtnMkxZczJUMENUaklmajRydDZKSXluZW4zOCJ9.eyJhdWQiOiJhcGk6Ly9zYW1wbGUtY2xpZW50LWlkIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvMWQxYTA2YTktYjIwYS00NTEzLThhNjQtZGFiMDhkMzJjOGI2LyIsImlhdCI6MTYwNzA3NTc1MiwibmJmIjoxNjA3MDc1NzUyLCJleHAiOjE2MDcwNzk2NTIsImFjciI6IjEiLCJhaW8iOiJBVFFBeS84UkFBQUFkSllKZkluaHhoWHBQTStVUVR0TmsrcnJnWG1FQmRpL0JhQWJUOGtQT2t1amJhQ2pBSTNBeUZWcnE0NGZHdHNOIiwiYW1yIjpbInB3ZCJdLCJhcHBpZCI6ImZmMzhjYjg2LTljMzgtNGUyMS1iZTY4LWM1ODFhNTVmYjVjMCIsImFwcGlkYWNyIjoiMSIsImZhbWlseV9uYW1lIjoiY2hlbiIsImdpdmVuX25hbWUiOiJhbXkiLCJpcGFkZHIiOiIxNjcuMjIwLjI1NS42OCIsIm5hbWUiOiJhbXkgY2hlbiIsIm9pZCI6ImFiZDI4ZGUxLTljMzctNDg5ZC04ZWVjLWZlZWVmNGQyNzRhMyIsInJoIjoiMC5BQUFBcVFZYUhRcXlFMFdLWk5xd2pUTEl0b2JMT1A4NG5DRk92bWpGZ2FWZnRjQjRBQUkuIiwic2NwIjoiUmVzb3VyY2VBY2Nlc3NDdXN0b21SZXNvdXJjZXMucmVhZCBSZXNvdXJjZUFjY2Vzc0dyYXBoLnJlYWQgUmVzb3VyY2VBY2Nlc3NPdGhlclJlc291cmNlcy5yZWFkIiwic3ViIjoiS0xyMXZFQTN3Wk1MdWFFZU1IUl80ZmdTdVVVVnNJWDhHREVlOWU5M1BPYyIsInRpZCI6IjFkMWEwNmE5LWIyMGEtNDUxMy04YTY0LWRhYjA4ZDMyYzhiNiIsInVuaXF1ZV9uYW1lIjoiYW15QG1vYXJ5Lm9ubWljcm9zb2Z0LmNvbSIsInVwbiI6ImFteUBtb2FyeS5vbm1pY3Jvc29mdC5jb20iLCJ1dGkiOiJFTG1xXzZVUkJFS19kN3I4ZlFJR0FBIiwidmVyIjoiMS4wIn0.fM_huHrr5M243oM3rMagGGckoxkLanFkurMJz4EBthrdQlFJzl6eo13pmU0Taq2ognAzsxUka0yihImrvhqzub9IGxRtCdQ3NAvD1fAiVdSUt_aBetIFCi5Pdc6I7KJDiGMQh8RTmduM7IOdxV_3-rug6dZXhW5TTmeq5PfLGYlrKOkC2za7M5G7gn7li1D5osh98HorFBWZoCDhe1iJPd_p_m0EffwTbKFwyvOGN-PKxyzOnoCOma_VYvRABUtBa8rNBFTaH5R9EAvsOmIZ_mI98Irl_8QNr9No-R0nXOrqKCFx5sMYkUuT7mvSaVPAlNr2X8eJjY3Wi-6ishufWQ"; private static final String OBO_ACCESS_TOKEN_2 = "eyJ0eXAiOiJKV1QiLCJub25jZSI6IkV2OUJILXNUcGdGYUwxTG5NSEVERGFUWDhVYmpuWmdVSEM4SF9BTmpUaXMiLCJhbGciOiJSUzI1NiIsIng1dCI6ImtnMkxZczJUMENUaklmajRydDZKSXluZW4zOCIsImtpZCI6ImtnMkxZczJUMENUaklmajRydDZKSXluZW4zOCJ9.eyJhdWQiOiJodHRwczovL2dyYXBoLm1pY3Jvc29mdC5jb20iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC8zMDhkZjA4YS0xMzMyLTRhMTUtYmIwNi0yYWQ3ZThiNzFiY2YvIiwiaWF0IjoxNjA3NTg4NTMwLCJuYmYiOjE2MDc1ODg1MzAsImV4cCI6MTYwNzU5MjQzMCwiYWNjdCI6MCwiYWNyIjoiMSIsImFjcnMiOlsidXJuOnVzZXI6cmVnaXN0ZXJzZWN1cml0eWluZm8iLCJ1cm46bWljcm9zb2Z0OnJlcTEiLCJ1cm46bWljcm9zb2Z0OnJlcTIiLCJ1cm46bWljcm9zb2Z0OnJlcTMiLCJjMSIsImMyIiwiYzMiLCJjNCIsImM1IiwiYzYiLCJjNyIsImM4IiwiYzkiLCJjMTAiLCJjMTEiLCJjMTIiLCJjMTMiLCJjMTQiLCJjMTUiLCJjMTYiLCJjMTciLCJjMTgiLCJjMTkiLCJjMjAiLCJjMjEiLCJjMjIiLCJjMjMiLCJjMjQiLCJjMjUiXSwiYWlvIjoiQVNRQTIvOFJBQUFBcVJFS29VQ0I2aFFoVmQxN0I3ZFhVb1NSbDlDZHpkL01yQjJZcWdRTXJXTT0iLCJhbXIiOlsicHdkIl0sImFwcF9kaXNwbGF5bmFtZSI6IkphdmEtd2ViYXBpIiwiYXBwaWQiOiIyYzQ3YjgzMS1kODM4LTQ2NGYtYTY4NC1mYTc5Y2JkNjRmMjAiLCJhcHBpZGFjciI6IjAiLCJoYXN3aWRzIjoidHJ1ZSIsImlkdHlwIjoidXNlciIsImlwYWRkciI6IjE2Ny4yMjAuMjU1LjExMSIsIm5hbWUiOiJBQURfVEVTVF9HWkgiLCJvaWQiOiJhMzlkMDEwMy0yZjBhLTQ1ZjAtYTEwNy1mOWZhZGVkYmQyNjgiLCJwbGF0ZiI6IjMiLCJwdWlkIjoiMTAwMzIwMDBFNjM0ODE1NyIsInJoIjoiMC5BQUFBaXZDTk1ESVRGVXE3QmlyWDZMY2J6ekc0Unl3NDJFOUdwb1Q2ZWN2V1R5QjFBQ3cuIiwic2NwIjoiVXNlci5SZWFkIFVzZXIuUmVhZC5BbGwgcHJvZmlsZSBvcGVuaWQgZW1haWwiLCJzdWIiOiJPenlvOUZkVzIyMWh0QjBOc0ZnR1VseGg3UnQ1UUFDaExYek9UdDlTQWU0IiwidGVuYW50X3JlZ2lvbl9zY29wZSI6Ik5BIiwidGlkIjoiMzA4ZGYwOGEtMTMzMi00YTE1LWJiMDYtMmFkN2U4YjcxYmNmIiwidW5pcXVlX25hbWUiOiJhYWRfdGVzdF9nemhAY29udG9zb3JnMjIyLm9ubWljcm9zb2Z0LmNvbSIsInVwbiI6ImFhZF90ZXN0X2d6aEBjb250b3NvcmcyMjIub25taWNyb3NvZnQuY29tIiwidXRpIjoiWXVzOU1pY2oxRTZqcW1XbWVPUU5BQSIsInZlciI6IjEuMCIsInhtc19zdCI6eyJzdWIiOiJiN3FKY3kyUUpqUFNOc3lWMTBscFQ3RDRieGVlM1NVQjVmV1p4WHZmZG1vIn0sInhtc190Y2R0IjoxNjAwODQ0ODg0fQ.t9qmH_o7kEPwtr42IBU1mddPiOF_V_CX8IOYW2CJVDwwn0aVCyt9H1vWcV67k5R2Pc29hBZaFJbU6oUFWqhLvzg15mwaI4LNUYrJaXGB-oTFmKFItNjtJ3pi4OsZutvth-EmYAoaeYvqbX2irX7br_ipMqQ5YLq9gf1F3PfV1EqdMuphZoirFYUhEioEM8DA3Qp6qSWMljXBEFDY4eAzT-h-p_7YQI0XH5R72P_4ERNgQ2j_B9ulCUWOGTO61NY3RU1IVwW-w17GLlCGjsakkf4V40_p8fgK8QArwYWlX-WlCt6fGWqjY2c4gvMoCM7bsqBJ9yREgcHzQZNc9N5Rxw"; private static final String AAD_PROPERTY_PREFIX = "azure.activedirectory."; private AADOboClientRegistrationRepository clientRegistrationsRepo; private OAuth2AuthorizedClient client; private IAuthenticationResult authenticationResult; private AADOAuth2OboAuthorizedClientRepository authorizedRepo; private JwtAuthenticationToken jwtAuthenticationToken; private MockHttpServletRequest mockHttpServletRequest; @BeforeEach @SuppressWarnings("unchecked") public void setupForAzureAuthorizedClient() throws ExecutionException, InterruptedException { ConfidentialClientApplication confidentialClientApplication = mock(ConfidentialClientApplication.class); CompletableFuture<IAuthenticationResult> acquireTokenFuture = mock(CompletableFuture.class); authenticationResult = mock(IAuthenticationResult.class); when(acquireTokenFuture.get()).thenReturn(authenticationResult); when(authenticationResult.accessToken()).thenReturn(OBO_ACCESS_TOKEN_1); when(confidentialClientApplication.acquireToken(any(OnBehalfOfParameters.class))) .thenReturn(acquireTokenFuture); AADOboClientRegistrationRepository clientRegistrationsRepo = mock(AADOboClientRegistrationRepository.class); when(clientRegistrationsRepo.findByRegistrationId(any())).thenReturn(ClientRegistration .withRegistrationId("fake-graph") .authorizationGrantType(AuthorizationGrantType.AUTHORIZATION_CODE) .redirectUriTemplate("{baseUrl}/login/oauth2/code/{registrationId}") .tokenUri("https: .jwkSetUri("https: .authorizationUri("https: + ".0/authorize") .scope("User.read") .clientId("2c47b831-d838-464f-a684-fa79cbd64f20").build()); authorizedRepo = new AADOAuth2OboAuthorizedClientRepository( clientRegistrationsRepo) { @Override ConfidentialClientApplication createApp(ClientRegistration clientRegistration) { if ("fake-graph".equals(clientRegistration.getRegistrationId())) { return confidentialClientApplication; } else { return null; } } }; final Jwt mockJwt = mock(Jwt.class); when(mockJwt.getTokenValue()).thenReturn("fake-token-value"); when(mockJwt.getSubject()).thenReturn("fake-principal-name"); jwtAuthenticationToken = new JwtAuthenticationToken(mockJwt); mockHttpServletRequest = new MockHttpServletRequest(); client = authorizedRepo.loadAuthorizedClient("fake-graph", jwtAuthenticationToken, mockHttpServletRequest ); } @Test @SuppressWarnings("unchecked") public void testLoadAzureAuthorizedClient() throws ExecutionException, InterruptedException { setupForAzureAuthorizedClient(); Assertions.assertEquals(OBO_ACCESS_TOKEN_1, client.getAccessToken().getTokenValue()); } @Test @SuppressWarnings("unchecked") public void testAuthorizedClientRequestLevelCache() throws ExecutionException, InterruptedException { setupForAzureAuthorizedClient(); Assertions.assertEquals(OBO_ACCESS_TOKEN_1, client.getAccessToken().getTokenValue()); when(authenticationResult.accessToken()).thenReturn(OBO_ACCESS_TOKEN_2); client = authorizedRepo.loadAuthorizedClient("fake-graph", jwtAuthenticationToken, mockHttpServletRequest ); Assertions.assertEquals(OBO_ACCESS_TOKEN_1, client.getAccessToken().getTokenValue()); Assertions.assertNotEquals(OBO_ACCESS_TOKEN_2, client.getAccessToken().getTokenValue()); } @Test @SuppressWarnings("unchecked") public void testLoadNotExistClientRegistration() { AADOAuth2OboAuthorizedClientRepository authorizedRepo = new AADOAuth2OboAuthorizedClientRepository( clientRegistrationsRepo); final Jwt mockJwt = mock(Jwt.class); OAuth2AuthorizedClient client = authorizedRepo.loadAuthorizedClient("fake-graph-fake", new JwtAuthenticationToken(mockJwt), new MockHttpServletRequest()); Assertions.assertNull(client); } @Test @SuppressWarnings("unchecked") public void testUnsupportedTokenImplementation() { AADOAuth2OboAuthorizedClientRepository authorizedRepo = new AADOAuth2OboAuthorizedClientRepository( clientRegistrationsRepo); PreAuthenticatedAuthenticationToken preToken = mock(PreAuthenticatedAuthenticationToken.class); try { authorizedRepo.loadAuthorizedClient("fake-graph", preToken, new MockHttpServletRequest()); fail("Expected an IllegalStateException to be thrown"); } catch (IllegalStateException e) { } } @Test @SuppressWarnings("unchecked") public void testNotExistClientApplication() { AADOAuth2OboAuthorizedClientRepository authorizedRepo = new AADOAuth2OboAuthorizedClientRepository( clientRegistrationsRepo) { @Override ConfidentialClientApplication createApp(ClientRegistration clientRegistration) { return null; } }; final Jwt mockJwt = mock(Jwt.class); when(mockJwt.getTokenValue()).thenReturn("fake-token-value"); OAuth2AuthorizedClient client = authorizedRepo.loadAuthorizedClient("fake-graph", new JwtAuthenticationToken(mockJwt), new MockHttpServletRequest()); Assertions.assertNull(client); } }
class AADOAuth2OboAuthorizedClientRepositoryTest { private static final String OBO_ACCESS_TOKEN_1 = "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6ImtnMkxZczJUMENUaklmajRydDZKSXluZW4zOCIsImtpZCI6ImtnMkxZczJUMENUaklmajRydDZKSXluZW4zOCJ9.eyJhdWQiOiJhcGk6Ly9zYW1wbGUtY2xpZW50LWlkIiwiaXNzIjoiaHR0cHM6Ly9zdHMud2luZG93cy5uZXQvMWQxYTA2YTktYjIwYS00NTEzLThhNjQtZGFiMDhkMzJjOGI2LyIsImlhdCI6MTYwNzA3NTc1MiwibmJmIjoxNjA3MDc1NzUyLCJleHAiOjE2MDcwNzk2NTIsImFjciI6IjEiLCJhaW8iOiJBVFFBeS84UkFBQUFkSllKZkluaHhoWHBQTStVUVR0TmsrcnJnWG1FQmRpL0JhQWJUOGtQT2t1amJhQ2pBSTNBeUZWcnE0NGZHdHNOIiwiYW1yIjpbInB3ZCJdLCJhcHBpZCI6ImZmMzhjYjg2LTljMzgtNGUyMS1iZTY4LWM1ODFhNTVmYjVjMCIsImFwcGlkYWNyIjoiMSIsImZhbWlseV9uYW1lIjoiY2hlbiIsImdpdmVuX25hbWUiOiJhbXkiLCJpcGFkZHIiOiIxNjcuMjIwLjI1NS42OCIsIm5hbWUiOiJhbXkgY2hlbiIsIm9pZCI6ImFiZDI4ZGUxLTljMzctNDg5ZC04ZWVjLWZlZWVmNGQyNzRhMyIsInJoIjoiMC5BQUFBcVFZYUhRcXlFMFdLWk5xd2pUTEl0b2JMT1A4NG5DRk92bWpGZ2FWZnRjQjRBQUkuIiwic2NwIjoiUmVzb3VyY2VBY2Nlc3NDdXN0b21SZXNvdXJjZXMucmVhZCBSZXNvdXJjZUFjY2Vzc0dyYXBoLnJlYWQgUmVzb3VyY2VBY2Nlc3NPdGhlclJlc291cmNlcy5yZWFkIiwic3ViIjoiS0xyMXZFQTN3Wk1MdWFFZU1IUl80ZmdTdVVVVnNJWDhHREVlOWU5M1BPYyIsInRpZCI6IjFkMWEwNmE5LWIyMGEtNDUxMy04YTY0LWRhYjA4ZDMyYzhiNiIsInVuaXF1ZV9uYW1lIjoiYW15QG1vYXJ5Lm9ubWljcm9zb2Z0LmNvbSIsInVwbiI6ImFteUBtb2FyeS5vbm1pY3Jvc29mdC5jb20iLCJ1dGkiOiJFTG1xXzZVUkJFS19kN3I4ZlFJR0FBIiwidmVyIjoiMS4wIn0.fM_huHrr5M243oM3rMagGGckoxkLanFkurMJz4EBthrdQlFJzl6eo13pmU0Taq2ognAzsxUka0yihImrvhqzub9IGxRtCdQ3NAvD1fAiVdSUt_aBetIFCi5Pdc6I7KJDiGMQh8RTmduM7IOdxV_3-rug6dZXhW5TTmeq5PfLGYlrKOkC2za7M5G7gn7li1D5osh98HorFBWZoCDhe1iJPd_p_m0EffwTbKFwyvOGN-PKxyzOnoCOma_VYvRABUtBa8rNBFTaH5R9EAvsOmIZ_mI98Irl_8QNr9No-R0nXOrqKCFx5sMYkUuT7mvSaVPAlNr2X8eJjY3Wi-6ishufWQ"; private static final String OBO_ACCESS_TOKEN_2 = "eyJ0eXAiOiJKV1QiLCJub25jZSI6IkV2OUJILXNUcGdGYUwxTG5NSEVERGFUWDhVYmpuWmdVSEM4SF9BTmpUaXMiLCJhbGciOiJSUzI1NiIsIng1dCI6ImtnMkxZczJUMENUaklmajRydDZKSXluZW4zOCIsImtpZCI6ImtnMkxZczJUMENUaklmajRydDZKSXluZW4zOCJ9.eyJhdWQiOiJodHRwczovL2dyYXBoLm1pY3Jvc29mdC5jb20iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC8zMDhkZjA4YS0xMzMyLTRhMTUtYmIwNi0yYWQ3ZThiNzFiY2YvIiwiaWF0IjoxNjA3NTg4NTMwLCJuYmYiOjE2MDc1ODg1MzAsImV4cCI6MTYwNzU5MjQzMCwiYWNjdCI6MCwiYWNyIjoiMSIsImFjcnMiOlsidXJuOnVzZXI6cmVnaXN0ZXJzZWN1cml0eWluZm8iLCJ1cm46bWljcm9zb2Z0OnJlcTEiLCJ1cm46bWljcm9zb2Z0OnJlcTIiLCJ1cm46bWljcm9zb2Z0OnJlcTMiLCJjMSIsImMyIiwiYzMiLCJjNCIsImM1IiwiYzYiLCJjNyIsImM4IiwiYzkiLCJjMTAiLCJjMTEiLCJjMTIiLCJjMTMiLCJjMTQiLCJjMTUiLCJjMTYiLCJjMTciLCJjMTgiLCJjMTkiLCJjMjAiLCJjMjEiLCJjMjIiLCJjMjMiLCJjMjQiLCJjMjUiXSwiYWlvIjoiQVNRQTIvOFJBQUFBcVJFS29VQ0I2aFFoVmQxN0I3ZFhVb1NSbDlDZHpkL01yQjJZcWdRTXJXTT0iLCJhbXIiOlsicHdkIl0sImFwcF9kaXNwbGF5bmFtZSI6IkphdmEtd2ViYXBpIiwiYXBwaWQiOiIyYzQ3YjgzMS1kODM4LTQ2NGYtYTY4NC1mYTc5Y2JkNjRmMjAiLCJhcHBpZGFjciI6IjAiLCJoYXN3aWRzIjoidHJ1ZSIsImlkdHlwIjoidXNlciIsImlwYWRkciI6IjE2Ny4yMjAuMjU1LjExMSIsIm5hbWUiOiJBQURfVEVTVF9HWkgiLCJvaWQiOiJhMzlkMDEwMy0yZjBhLTQ1ZjAtYTEwNy1mOWZhZGVkYmQyNjgiLCJwbGF0ZiI6IjMiLCJwdWlkIjoiMTAwMzIwMDBFNjM0ODE1NyIsInJoIjoiMC5BQUFBaXZDTk1ESVRGVXE3QmlyWDZMY2J6ekc0Unl3NDJFOUdwb1Q2ZWN2V1R5QjFBQ3cuIiwic2NwIjoiVXNlci5SZWFkIFVzZXIuUmVhZC5BbGwgcHJvZmlsZSBvcGVuaWQgZW1haWwiLCJzdWIiOiJPenlvOUZkVzIyMWh0QjBOc0ZnR1VseGg3UnQ1UUFDaExYek9UdDlTQWU0IiwidGVuYW50X3JlZ2lvbl9zY29wZSI6Ik5BIiwidGlkIjoiMzA4ZGYwOGEtMTMzMi00YTE1LWJiMDYtMmFkN2U4YjcxYmNmIiwidW5pcXVlX25hbWUiOiJhYWRfdGVzdF9nemhAY29udG9zb3JnMjIyLm9ubWljcm9zb2Z0LmNvbSIsInVwbiI6ImFhZF90ZXN0X2d6aEBjb250b3NvcmcyMjIub25taWNyb3NvZnQuY29tIiwidXRpIjoiWXVzOU1pY2oxRTZqcW1XbWVPUU5BQSIsInZlciI6IjEuMCIsInhtc19zdCI6eyJzdWIiOiJiN3FKY3kyUUpqUFNOc3lWMTBscFQ3RDRieGVlM1NVQjVmV1p4WHZmZG1vIn0sInhtc190Y2R0IjoxNjAwODQ0ODg0fQ.t9qmH_o7kEPwtr42IBU1mddPiOF_V_CX8IOYW2CJVDwwn0aVCyt9H1vWcV67k5R2Pc29hBZaFJbU6oUFWqhLvzg15mwaI4LNUYrJaXGB-oTFmKFItNjtJ3pi4OsZutvth-EmYAoaeYvqbX2irX7br_ipMqQ5YLq9gf1F3PfV1EqdMuphZoirFYUhEioEM8DA3Qp6qSWMljXBEFDY4eAzT-h-p_7YQI0XH5R72P_4ERNgQ2j_B9ulCUWOGTO61NY3RU1IVwW-w17GLlCGjsakkf4V40_p8fgK8QArwYWlX-WlCt6fGWqjY2c4gvMoCM7bsqBJ9yREgcHzQZNc9N5Rxw"; private static final String AAD_PROPERTY_PREFIX = "azure.activedirectory."; private InMemoryClientRegistrationRepository clientRegistrationsRepo; private OAuth2AuthorizedClient client; private IAuthenticationResult authenticationResult; private AADOAuth2OboAuthorizedClientRepository authorizedRepo; private JwtAuthenticationToken jwtAuthenticationToken; private MockHttpServletRequest mockHttpServletRequest; @BeforeEach @SuppressWarnings("unchecked") public void setupForAzureAuthorizedClient() throws ExecutionException, InterruptedException { ConfidentialClientApplication confidentialClientApplication = mock(ConfidentialClientApplication.class); CompletableFuture<IAuthenticationResult> acquireTokenFuture = mock(CompletableFuture.class); authenticationResult = mock(IAuthenticationResult.class); when(acquireTokenFuture.get()).thenReturn(authenticationResult); when(authenticationResult.accessToken()).thenReturn(OBO_ACCESS_TOKEN_1); when(confidentialClientApplication.acquireToken(any(OnBehalfOfParameters.class))) .thenReturn(acquireTokenFuture); InMemoryClientRegistrationRepository clientRegistrationsRepo = mock(InMemoryClientRegistrationRepository.class); when(clientRegistrationsRepo.findByRegistrationId(any())).thenReturn(ClientRegistration .withRegistrationId("fake-graph") .authorizationGrantType(AuthorizationGrantType.AUTHORIZATION_CODE) .redirectUriTemplate("{baseUrl}/login/oauth2/code/{registrationId}") .tokenUri("https: .jwkSetUri("https: .authorizationUri("https: + ".0/authorize") .scope("User.read") .clientId("2c47b831-d838-464f-a684-fa79cbd64f20").build()); authorizedRepo = new AADOAuth2OboAuthorizedClientRepository( clientRegistrationsRepo) { @Override ConfidentialClientApplication createApp(ClientRegistration clientRegistration) { if ("fake-graph".equals(clientRegistration.getRegistrationId())) { return confidentialClientApplication; } else { return null; } } }; final Jwt mockJwt = mock(Jwt.class); when(mockJwt.getTokenValue()).thenReturn("fake-token-value"); when(mockJwt.getSubject()).thenReturn("fake-principal-name"); jwtAuthenticationToken = new JwtAuthenticationToken(mockJwt); mockHttpServletRequest = new MockHttpServletRequest(); client = authorizedRepo.loadAuthorizedClient("fake-graph", jwtAuthenticationToken, mockHttpServletRequest ); } @Test @SuppressWarnings("unchecked") public void testLoadAzureAuthorizedClient() throws ExecutionException, InterruptedException { setupForAzureAuthorizedClient(); Assertions.assertEquals(OBO_ACCESS_TOKEN_1, client.getAccessToken().getTokenValue()); } @Test @SuppressWarnings("unchecked") public void testAuthorizedClientRequestLevelCache() throws ExecutionException, InterruptedException { setupForAzureAuthorizedClient(); Assertions.assertEquals(OBO_ACCESS_TOKEN_1, client.getAccessToken().getTokenValue()); when(authenticationResult.accessToken()).thenReturn(OBO_ACCESS_TOKEN_2); client = authorizedRepo.loadAuthorizedClient("fake-graph", jwtAuthenticationToken, mockHttpServletRequest ); Assertions.assertEquals(OBO_ACCESS_TOKEN_1, client.getAccessToken().getTokenValue()); Assertions.assertNotEquals(OBO_ACCESS_TOKEN_2, client.getAccessToken().getTokenValue()); } @Test @SuppressWarnings("unchecked") public void testLoadNotExistClientRegistration() { AADOAuth2OboAuthorizedClientRepository authorizedRepo = new AADOAuth2OboAuthorizedClientRepository( clientRegistrationsRepo); final Jwt mockJwt = mock(Jwt.class); OAuth2AuthorizedClient client = authorizedRepo.loadAuthorizedClient("fake-graph-fake", new JwtAuthenticationToken(mockJwt), new MockHttpServletRequest()); Assertions.assertNull(client); } @Test @SuppressWarnings("unchecked") public void testUnsupportedTokenImplementation() { AADOAuth2OboAuthorizedClientRepository authorizedRepo = new AADOAuth2OboAuthorizedClientRepository( clientRegistrationsRepo); PreAuthenticatedAuthenticationToken preToken = mock(PreAuthenticatedAuthenticationToken.class); try { authorizedRepo.loadAuthorizedClient("fake-graph", preToken, new MockHttpServletRequest()); fail("Expected an IllegalStateException to be thrown"); } catch (IllegalStateException e) { } } @Test @SuppressWarnings("unchecked") public void testNotExistClientApplication() { AADOAuth2OboAuthorizedClientRepository authorizedRepo = new AADOAuth2OboAuthorizedClientRepository( clientRegistrationsRepo) { @Override ConfidentialClientApplication createApp(ClientRegistration clientRegistration) { return null; } }; final Jwt mockJwt = mock(Jwt.class); when(mockJwt.getTokenValue()).thenReturn("fake-token-value"); OAuth2AuthorizedClient client = authorizedRepo.loadAuthorizedClient("fake-graph", new JwtAuthenticationToken(mockJwt), new MockHttpServletRequest()); Assertions.assertNull(client); } }
Can we assert the value instead of assert size?
public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read") .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.custom.scopes=User.read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).hasSize(1); assertThat(customScopes).hasSize(1); }); }
assertThat(customScopes).hasSize(1);
public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .withPropertyValues("azure.activedirectory.authorization.custom" + ".scopes=api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); }
class AADResourceServerOboConfigurationTest { private static final String AAD_PROPERTY_PREFIX = "azure.activedirectory."; private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( AAD_PROPERTY_PREFIX + "tenant-id=fake-tenant-id", AAD_PROPERTY_PREFIX + "client-id=fake-client-id", AAD_PROPERTY_PREFIX + "client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).hasSize(1); }); } @Test }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); } @Test }
Same here.
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).hasSize(1); }); }
assertThat(graphScopes).hasSize(1);
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); }
class AADResourceServerOboConfigurationTest { private static final String AAD_PROPERTY_PREFIX = "azure.activedirectory."; private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( AAD_PROPERTY_PREFIX + "tenant-id=fake-tenant-id", AAD_PROPERTY_PREFIX + "client-id=fake-client-id", AAD_PROPERTY_PREFIX + "client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read") .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.custom.scopes=User.read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).hasSize(1); assertThat(customScopes).hasSize(1); }); } }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .withPropertyValues("azure.activedirectory.authorization.custom" + ".scopes=api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
1. `User.read` -> `User.Read`. `R` should be upper case. 2. `scope` value should have prefix. i.e. it should be `http://xxx/User.Read` 3. It's better use more than one scope to test. Like `xxx.scopes=xxx/User.Read, xxx/User.Write`.
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).hasSize(1); }); }
.withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read")
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); }
class AADResourceServerOboConfigurationTest { private static final String AAD_PROPERTY_PREFIX = "azure.activedirectory."; private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( AAD_PROPERTY_PREFIX + "tenant-id=fake-tenant-id", AAD_PROPERTY_PREFIX + "client-id=fake-client-id", AAD_PROPERTY_PREFIX + "client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read") .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.custom.scopes=User.read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).hasSize(1); assertThat(customScopes).hasSize(1); }); } }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .withPropertyValues("azure.activedirectory.authorization.custom" + ".scopes=api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
Why is it better to use more than one scope to test?
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).hasSize(1); }); }
.withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read")
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); }
class AADResourceServerOboConfigurationTest { private static final String AAD_PROPERTY_PREFIX = "azure.activedirectory."; private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( AAD_PROPERTY_PREFIX + "tenant-id=fake-tenant-id", AAD_PROPERTY_PREFIX + "client-id=fake-client-id", AAD_PROPERTY_PREFIX + "client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read") .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.custom.scopes=User.read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).hasSize(1); assertThat(customScopes).hasSize(1); }); } }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .withPropertyValues("azure.activedirectory.authorization.custom" + ".scopes=api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
sure
public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read") .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.custom.scopes=User.read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).hasSize(1); assertThat(customScopes).hasSize(1); }); }
assertThat(customScopes).hasSize(1);
public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .withPropertyValues("azure.activedirectory.authorization.custom" + ".scopes=api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); }
class AADResourceServerOboConfigurationTest { private static final String AAD_PROPERTY_PREFIX = "azure.activedirectory."; private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( AAD_PROPERTY_PREFIX + "tenant-id=fake-tenant-id", AAD_PROPERTY_PREFIX + "client-id=fake-client-id", AAD_PROPERTY_PREFIX + "client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).hasSize(1); }); } @Test }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); } @Test }
@saragluna So we can test whether all scopes take effect.
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).hasSize(1); }); }
.withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read")
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); }
class AADResourceServerOboConfigurationTest { private static final String AAD_PROPERTY_PREFIX = "azure.activedirectory."; private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( AAD_PROPERTY_PREFIX + "tenant-id=fake-tenant-id", AAD_PROPERTY_PREFIX + "client-id=fake-client-id", AAD_PROPERTY_PREFIX + "client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read") .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.custom.scopes=User.read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).hasSize(1); assertThat(customScopes).hasSize(1); }); } }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .withPropertyValues("azure.activedirectory.authorization.custom" + ".scopes=api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
But how do we achieve that in the unit test?
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).hasSize(1); }); }
.withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read")
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); }
class AADResourceServerOboConfigurationTest { private static final String AAD_PROPERTY_PREFIX = "azure.activedirectory."; private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( AAD_PROPERTY_PREFIX + "tenant-id=fake-tenant-id", AAD_PROPERTY_PREFIX + "client-id=fake-client-id", AAD_PROPERTY_PREFIX + "client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.graph.scopes=User.read") .withPropertyValues(AAD_PROPERTY_PREFIX + "authorization.custom.scopes=User.read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).hasSize(1); assertThat(customScopes).hasSize(1); }); } }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .withPropertyValues("azure.activedirectory.authorization.custom" + ".scopes=api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
@saragluna, > But how do we achieve that in the unit test? We can do like this: ``` assertThat(result, Matchers.arrayContaining(expected)); ``` Refs: https://stackoverflow.com/questions/54439629/hamcrest-matcher-to-compare-two-arrays
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); }
assertThat(graphScopes).containsOnly("https:
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .withPropertyValues("azure.activedirectory.authorization.custom" + ".scopes=api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .withPropertyValues("azure.activedirectory.authorization.custom" + ".scopes=api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
get it.
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); }
assertThat(graphScopes).containsOnly("https:
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .withPropertyValues("azure.activedirectory.authorization.custom" + ".scopes=api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .withPropertyValues("azure.activedirectory.authorization.custom" + ".scopes=api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
@chenrujun I see what you're trying to say here. If we have no special logic here and it's just spring doing the parsing, so the array of one element or two elements doesn't make that much difference to me. But we could do with two elements here.
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); }
assertThat(graphScopes).containsOnly("https:
public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .withPropertyValues("azure.activedirectory.authorization.custom" + ".scopes=api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization.graph.scopes=https: + ".Read") .withPropertyValues("azure.activedirectory.authorization.custom" + ".scopes=api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
This is a fluent model, you can chain these properties.
public static void main(String[] args) throws InterruptedException { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient administrationClient = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties queueProperties = administrationClient.getQueue("<<queue-name>>"); System.out.printf("Before update queue properties status :[%s], Max Delivery count :[%d]. %n" , queueProperties.getStatus(), queueProperties.getMaxDeliveryCount()); queueProperties.setStatus(EntityStatus.DISABLED); queueProperties.setMaxDeliveryCount(9); QueueProperties updatedQueueProperties = administrationClient.updateQueue(queueProperties); System.out.printf("After update queue properties status :[%s], Max Delivery count :[%d]. %n" , updatedQueueProperties.getStatus(), updatedQueueProperties.getMaxDeliveryCount()); }
queueProperties.setStatus(EntityStatus.DISABLED);
public static void main(String[] args) { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties properties = client.getQueue("<<queue-name>>"); System.out.printf("Before queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", properties.getLockDuration().getSeconds(), properties.getMaxDeliveryCount()); properties.setMaxDeliveryCount(10).setLockDuration(Duration.ofSeconds(60)); QueueProperties updatedProperties = client.updateQueue(properties); System.out.printf("After queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", updatedProperties.getLockDuration().getSeconds(), updatedProperties.getMaxDeliveryCount()); }
class AdministrationClientUpdateQueueSample { /** * Main method to how update queue properties in Service Bus Queue. * * @param args Unused arguments to the program. * @throws InterruptedException If the program is unable to sleep while waiting for the receive to complete. */ }
class AdministrationClientUpdateQueueSample { /** * Main method to show how to update properties of Service Bus Queue. * * @param args Unused arguments to the program. */ }
The variable names are really wordy. I think `properties`, `client`, `updatedProperties` versus `queueProperties` and `administrationClient` is enough.
public static void main(String[] args) throws InterruptedException { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient administrationClient = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties queueProperties = administrationClient.getQueue("<<queue-name>>"); System.out.printf("Before update queue properties status :[%s], Max Delivery count :[%d]. %n" , queueProperties.getStatus(), queueProperties.getMaxDeliveryCount()); queueProperties.setStatus(EntityStatus.DISABLED); queueProperties.setMaxDeliveryCount(9); QueueProperties updatedQueueProperties = administrationClient.updateQueue(queueProperties); System.out.printf("After update queue properties status :[%s], Max Delivery count :[%d]. %n" , updatedQueueProperties.getStatus(), updatedQueueProperties.getMaxDeliveryCount()); }
QueueProperties queueProperties = administrationClient.getQueue("<<queue-name>>");
public static void main(String[] args) { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties properties = client.getQueue("<<queue-name>>"); System.out.printf("Before queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", properties.getLockDuration().getSeconds(), properties.getMaxDeliveryCount()); properties.setMaxDeliveryCount(10).setLockDuration(Duration.ofSeconds(60)); QueueProperties updatedProperties = client.updateQueue(properties); System.out.printf("After queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", updatedProperties.getLockDuration().getSeconds(), updatedProperties.getMaxDeliveryCount()); }
class AdministrationClientUpdateQueueSample { /** * Main method to how update queue properties in Service Bus Queue. * * @param args Unused arguments to the program. * @throws InterruptedException If the program is unable to sleep while waiting for the receive to complete. */ }
class AdministrationClientUpdateQueueSample { /** * Main method to show how to update properties of Service Bus Queue. * * @param args Unused arguments to the program. */ }
The spacing is off on this sentence. "Before queue properties status: [%s], max delivery count: [%d].%n" There is one space after a colon and no space before.
public static void main(String[] args) throws InterruptedException { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient administrationClient = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties queueProperties = administrationClient.getQueue("<<queue-name>>"); System.out.printf("Before update queue properties status :[%s], Max Delivery count :[%d]. %n" , queueProperties.getStatus(), queueProperties.getMaxDeliveryCount()); queueProperties.setStatus(EntityStatus.DISABLED); queueProperties.setMaxDeliveryCount(9); QueueProperties updatedQueueProperties = administrationClient.updateQueue(queueProperties); System.out.printf("After update queue properties status :[%s], Max Delivery count :[%d]. %n" , updatedQueueProperties.getStatus(), updatedQueueProperties.getMaxDeliveryCount()); }
System.out.printf("Before update queue properties status :[%s], Max Delivery count :[%d]. %n" ,
public static void main(String[] args) { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties properties = client.getQueue("<<queue-name>>"); System.out.printf("Before queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", properties.getLockDuration().getSeconds(), properties.getMaxDeliveryCount()); properties.setMaxDeliveryCount(10).setLockDuration(Duration.ofSeconds(60)); QueueProperties updatedProperties = client.updateQueue(properties); System.out.printf("After queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", updatedProperties.getLockDuration().getSeconds(), updatedProperties.getMaxDeliveryCount()); }
class AdministrationClientUpdateQueueSample { /** * Main method to how update queue properties in Service Bus Queue. * * @param args Unused arguments to the program. * @throws InterruptedException If the program is unable to sleep while waiting for the receive to complete. */ }
class AdministrationClientUpdateQueueSample { /** * Main method to show how to update properties of Service Bus Queue. * * @param args Unused arguments to the program. */ }
Same here. "After queue properties status: [%s], max delivery count: [%d].%n"
public static void main(String[] args) throws InterruptedException { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient administrationClient = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties queueProperties = administrationClient.getQueue("<<queue-name>>"); System.out.printf("Before update queue properties status :[%s], Max Delivery count :[%d]. %n" , queueProperties.getStatus(), queueProperties.getMaxDeliveryCount()); queueProperties.setStatus(EntityStatus.DISABLED); queueProperties.setMaxDeliveryCount(9); QueueProperties updatedQueueProperties = administrationClient.updateQueue(queueProperties); System.out.printf("After update queue properties status :[%s], Max Delivery count :[%d]. %n" , updatedQueueProperties.getStatus(), updatedQueueProperties.getMaxDeliveryCount()); }
System.out.printf("After update queue properties status :[%s], Max Delivery count :[%d]. %n" ,
public static void main(String[] args) { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties properties = client.getQueue("<<queue-name>>"); System.out.printf("Before queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", properties.getLockDuration().getSeconds(), properties.getMaxDeliveryCount()); properties.setMaxDeliveryCount(10).setLockDuration(Duration.ofSeconds(60)); QueueProperties updatedProperties = client.updateQueue(properties); System.out.printf("After queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", updatedProperties.getLockDuration().getSeconds(), updatedProperties.getMaxDeliveryCount()); }
class AdministrationClientUpdateQueueSample { /** * Main method to how update queue properties in Service Bus Queue. * * @param args Unused arguments to the program. * @throws InterruptedException If the program is unable to sleep while waiting for the receive to complete. */ }
class AdministrationClientUpdateQueueSample { /** * Main method to show how to update properties of Service Bus Queue. * * @param args Unused arguments to the program. */ }
Same as what you're doing right now for other samples, use environment variable so this sample can be run without any changes?
public static void main(String[] args) throws InterruptedException { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties properties = client.getQueue("<<queue-name>>"); System.out.printf("Before queue properties status: [%s], Max Delivery count: [%d].%n", properties.getStatus(), properties.getMaxDeliveryCount()); properties.setStatus(EntityStatus.DISABLED).setMaxDeliveryCount(9); QueueProperties updatedProperties = client.updateQueue(properties); System.out.printf("After queue properties status: [%s], Max Delivery count: [%d].%n", updatedProperties.getStatus(), updatedProperties.getMaxDeliveryCount()); }
String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};"
public static void main(String[] args) { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties properties = client.getQueue("<<queue-name>>"); System.out.printf("Before queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", properties.getLockDuration().getSeconds(), properties.getMaxDeliveryCount()); properties.setMaxDeliveryCount(10).setLockDuration(Duration.ofSeconds(60)); QueueProperties updatedProperties = client.updateQueue(properties); System.out.printf("After queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", updatedProperties.getLockDuration().getSeconds(), updatedProperties.getMaxDeliveryCount()); }
class AdministrationClientUpdateQueueSample { /** * Main method to how update queue properties in Service Bus Queue. * * @param args Unused arguments to the program. * @throws InterruptedException If the program is unable to sleep while waiting for the receive to complete. */ }
class AdministrationClientUpdateQueueSample { /** * Main method to show how to update properties of Service Bus Queue. * * @param args Unused arguments to the program. */ }
It's atypical to update entity status. I suggest use other properties mentioned here https://docs.microsoft.com/en-us/rest/api/servicebus/update-queue
public static void main(String[] args) throws InterruptedException { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties properties = client.getQueue("<<queue-name>>"); System.out.printf("Before queue properties status: [%s], Max Delivery count: [%d].%n", properties.getStatus(), properties.getMaxDeliveryCount()); properties.setStatus(EntityStatus.DISABLED).setMaxDeliveryCount(9); QueueProperties updatedProperties = client.updateQueue(properties); System.out.printf("After queue properties status: [%s], Max Delivery count: [%d].%n", updatedProperties.getStatus(), updatedProperties.getMaxDeliveryCount()); }
properties.setStatus(EntityStatus.DISABLED).setMaxDeliveryCount(9);
public static void main(String[] args) { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties properties = client.getQueue("<<queue-name>>"); System.out.printf("Before queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", properties.getLockDuration().getSeconds(), properties.getMaxDeliveryCount()); properties.setMaxDeliveryCount(10).setLockDuration(Duration.ofSeconds(60)); QueueProperties updatedProperties = client.updateQueue(properties); System.out.printf("After queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", updatedProperties.getLockDuration().getSeconds(), updatedProperties.getMaxDeliveryCount()); }
class AdministrationClientUpdateQueueSample { /** * Main method to how update queue properties in Service Bus Queue. * * @param args Unused arguments to the program. * @throws InterruptedException If the program is unable to sleep while waiting for the receive to complete. */ }
class AdministrationClientUpdateQueueSample { /** * Main method to show how to update properties of Service Bus Queue. * * @param args Unused arguments to the program. */ }
I have another PR where I am converting samples which will use env variables and also setup pipeline to create those resources and run them. This work would have to be don in that PR https://github.com/Azure/azure-sdk-for-java/pull/18185
public static void main(String[] args) throws InterruptedException { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties properties = client.getQueue("<<queue-name>>"); System.out.printf("Before queue properties status: [%s], Max Delivery count: [%d].%n", properties.getStatus(), properties.getMaxDeliveryCount()); properties.setStatus(EntityStatus.DISABLED).setMaxDeliveryCount(9); QueueProperties updatedProperties = client.updateQueue(properties); System.out.printf("After queue properties status: [%s], Max Delivery count: [%d].%n", updatedProperties.getStatus(), updatedProperties.getMaxDeliveryCount()); }
String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};"
public static void main(String[] args) { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties properties = client.getQueue("<<queue-name>>"); System.out.printf("Before queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", properties.getLockDuration().getSeconds(), properties.getMaxDeliveryCount()); properties.setMaxDeliveryCount(10).setLockDuration(Duration.ofSeconds(60)); QueueProperties updatedProperties = client.updateQueue(properties); System.out.printf("After queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", updatedProperties.getLockDuration().getSeconds(), updatedProperties.getMaxDeliveryCount()); }
class AdministrationClientUpdateQueueSample { /** * Main method to how update queue properties in Service Bus Queue. * * @param args Unused arguments to the program. * @throws InterruptedException If the program is unable to sleep while waiting for the receive to complete. */ }
class AdministrationClientUpdateQueueSample { /** * Main method to show how to update properties of Service Bus Queue. * * @param args Unused arguments to the program. */ }
We did had a user query, but I agree that it is not regular use case to disable the queue.
public static void main(String[] args) throws InterruptedException { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties properties = client.getQueue("<<queue-name>>"); System.out.printf("Before queue properties status: [%s], Max Delivery count: [%d].%n", properties.getStatus(), properties.getMaxDeliveryCount()); properties.setStatus(EntityStatus.DISABLED).setMaxDeliveryCount(9); QueueProperties updatedProperties = client.updateQueue(properties); System.out.printf("After queue properties status: [%s], Max Delivery count: [%d].%n", updatedProperties.getStatus(), updatedProperties.getMaxDeliveryCount()); }
properties.setStatus(EntityStatus.DISABLED).setMaxDeliveryCount(9);
public static void main(String[] args) { String connectionString = "Endpoint={fully-qualified-namespace};SharedAccessKeyName={policy-name};" + "SharedAccessKey={key}"; ServiceBusAdministrationClient client = new ServiceBusAdministrationClientBuilder() .connectionString(connectionString) .buildClient(); QueueProperties properties = client.getQueue("<<queue-name>>"); System.out.printf("Before queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", properties.getLockDuration().getSeconds(), properties.getMaxDeliveryCount()); properties.setMaxDeliveryCount(10).setLockDuration(Duration.ofSeconds(60)); QueueProperties updatedProperties = client.updateQueue(properties); System.out.printf("After queue properties LockDuration: [%d seconds], Max Delivery count: [%d].%n", updatedProperties.getLockDuration().getSeconds(), updatedProperties.getMaxDeliveryCount()); }
class AdministrationClientUpdateQueueSample { /** * Main method to how update queue properties in Service Bus Queue. * * @param args Unused arguments to the program. * @throws InterruptedException If the program is unable to sleep while waiting for the receive to complete. */ }
class AdministrationClientUpdateQueueSample { /** * Main method to show how to update properties of Service Bus Queue. * * @param args Unused arguments to the program. */ }
Hi, @han-gao , we should test method `AADB2CAuthorizationRequestResolver.resolve(...)` instead of only test `AADB2CProperties`
public void testPropertiesBean() { this.contextRunner.run(c -> { final AADB2CProperties properties = c.getBean(AADB2CProperties.class); assertThat(properties).isNotNull(); assertThat(properties.getTenant()).isEqualTo(AADB2CConstants.TEST_TENANT); assertThat(properties.getClientId()).isEqualTo(AADB2CConstants.TEST_CLIENT_ID); assertThat(properties.getClientSecret()).isEqualTo(AADB2CConstants.TEST_CLIENT_SECRET); assertThat(properties.getReplyUrl()).isEqualTo(AADB2CConstants.TEST_REPLY_URL); final String signUpOrSignIn = properties.getUserFlows().getSignUpOrSignIn(); final Object prompt = properties.getAuthenticateAdditionalParameters().get(AADB2CConstants.PROMPT); final String loginHint = String.valueOf(properties.getAuthenticateAdditionalParameters().get(AADB2CConstants.LOGIN_HINT)); assertThat(signUpOrSignIn).isEqualTo(AADB2CConstants.TEST_SIGN_UP_OR_IN_NAME); assertThat(prompt).isEqualTo(AADB2CConstants.TEST_PROMPT); assertThat(loginHint).isEqualTo(AADB2CConstants.TEST_LOGIN_HINT); }); }
assertThat(loginHint).isEqualTo(AADB2CConstants.TEST_LOGIN_HINT);
public void testPropertiesBean() { this.contextRunner.run(c -> { final AADB2CProperties properties = c.getBean(AADB2CProperties.class); assertThat(properties).isNotNull(); assertThat(properties.getTenant()).isEqualTo(AADB2CConstants.TEST_TENANT); assertThat(properties.getClientId()).isEqualTo(AADB2CConstants.TEST_CLIENT_ID); assertThat(properties.getClientSecret()).isEqualTo(AADB2CConstants.TEST_CLIENT_SECRET); assertThat(properties.getReplyUrl()).isEqualTo(AADB2CConstants.TEST_REPLY_URL); final String signUpOrSignIn = properties.getUserFlows().getSignUpOrSignIn(); final Object prompt = properties.getAuthenticateAdditionalParameters().get(AADB2CConstants.PROMPT); final String loginHint = String.valueOf(properties.getAuthenticateAdditionalParameters().get(AADB2CConstants.LOGIN_HINT)); assertThat(signUpOrSignIn).isEqualTo(AADB2CConstants.TEST_SIGN_UP_OR_IN_NAME); assertThat(prompt).isEqualTo(AADB2CConstants.TEST_PROMPT); assertThat(loginHint).isEqualTo(AADB2CConstants.TEST_LOGIN_HINT); }); }
class AADB2CAutoConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AADB2CAutoConfiguration.class)) .withPropertyValues( String.format("%s=%s", AADB2CConstants.TENANT, AADB2CConstants.TEST_TENANT), String.format("%s=%s", AADB2CConstants.CLIENT_ID, AADB2CConstants.TEST_CLIENT_ID), String.format("%s=%s", AADB2CConstants.CLIENT_SECRET, AADB2CConstants.TEST_CLIENT_SECRET), String.format("%s=%s", AADB2CConstants.REPLY_URL, AADB2CConstants.TEST_REPLY_URL), String.format("%s=%s", AADB2CConstants.LOGOUT_SUCCESS_URL, AADB2CConstants.TEST_LOGOUT_SUCCESS_URL), String.format("%s=%s", AADB2CConstants.SIGN_UP_OR_SIGN_IN, AADB2CConstants.TEST_SIGN_UP_OR_IN_NAME), String.format("%s=%s", AADB2CConstants.CONFIG_PROMPT, AADB2CConstants.TEST_PROMPT), String.format("%s=%s", AADB2CConstants.CONFIG_LOGIN_HINT, AADB2CConstants.TEST_LOGIN_HINT) ); @Test public void testAutoConfigurationBean() { this.contextRunner.run(c -> { final AADB2CAutoConfiguration config = c.getBean(AADB2CAutoConfiguration.class); assertThat(config).isNotNull(); }); } @Test @Test public void testAADB2CAuthorizationRequestResolverBean() { this.contextRunner.run(c -> { final AADB2CAuthorizationRequestResolver resolver = c.getBean(AADB2CAuthorizationRequestResolver.class); assertThat(resolver).isNotNull(); }); } @Test public void testLogoutSuccessHandlerBean() { this.contextRunner.run(c -> { final AADB2CLogoutSuccessHandler handler = c.getBean(AADB2CLogoutSuccessHandler.class); assertThat(handler).isNotNull(); }); } @Test public void testFilterBean() { this.contextRunner.run(c -> { final ClientRegistrationRepository repository = c.getBean(ClientRegistrationRepository.class); assertThat(repository).isNotNull(); }); } }
class AADB2CAutoConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AADB2CAutoConfiguration.class)) .withPropertyValues( String.format("%s=%s", AADB2CConstants.TENANT, AADB2CConstants.TEST_TENANT), String.format("%s=%s", AADB2CConstants.CLIENT_ID, AADB2CConstants.TEST_CLIENT_ID), String.format("%s=%s", AADB2CConstants.CLIENT_SECRET, AADB2CConstants.TEST_CLIENT_SECRET), String.format("%s=%s", AADB2CConstants.REPLY_URL, AADB2CConstants.TEST_REPLY_URL), String.format("%s=%s", AADB2CConstants.LOGOUT_SUCCESS_URL, AADB2CConstants.TEST_LOGOUT_SUCCESS_URL), String.format("%s=%s", AADB2CConstants.SIGN_UP_OR_SIGN_IN, AADB2CConstants.TEST_SIGN_UP_OR_IN_NAME), String.format("%s=%s", AADB2CConstants.CONFIG_PROMPT, AADB2CConstants.TEST_PROMPT), String.format("%s=%s", AADB2CConstants.CONFIG_LOGIN_HINT, AADB2CConstants.TEST_LOGIN_HINT) ); @Test public void testAutoConfigurationBean() { this.contextRunner.run(c -> { final AADB2CAutoConfiguration config = c.getBean(AADB2CAutoConfiguration.class); assertThat(config).isNotNull(); }); } @Test @Test public void testAADB2CAuthorizationRequestResolverBean() { this.contextRunner.run(c -> { final AADB2CAuthorizationRequestResolver resolver = c.getBean(AADB2CAuthorizationRequestResolver.class); assertThat(resolver).isNotNull(); }); } @Test public void testLogoutSuccessHandlerBean() { this.contextRunner.run(c -> { final AADB2CLogoutSuccessHandler handler = c.getBean(AADB2CLogoutSuccessHandler.class); assertThat(handler).isNotNull(); }); } @Test public void testFilterBean() { this.contextRunner.run(c -> { final ClientRegistrationRepository repository = c.getBean(ClientRegistrationRepository.class); assertThat(repository).isNotNull(); }); } }
Seems it already exist in AADB2CAuthorizationRequestResolverTest.java. OK, I'll merge it.
public void testPropertiesBean() { this.contextRunner.run(c -> { final AADB2CProperties properties = c.getBean(AADB2CProperties.class); assertThat(properties).isNotNull(); assertThat(properties.getTenant()).isEqualTo(AADB2CConstants.TEST_TENANT); assertThat(properties.getClientId()).isEqualTo(AADB2CConstants.TEST_CLIENT_ID); assertThat(properties.getClientSecret()).isEqualTo(AADB2CConstants.TEST_CLIENT_SECRET); assertThat(properties.getReplyUrl()).isEqualTo(AADB2CConstants.TEST_REPLY_URL); final String signUpOrSignIn = properties.getUserFlows().getSignUpOrSignIn(); final Object prompt = properties.getAuthenticateAdditionalParameters().get(AADB2CConstants.PROMPT); final String loginHint = String.valueOf(properties.getAuthenticateAdditionalParameters().get(AADB2CConstants.LOGIN_HINT)); assertThat(signUpOrSignIn).isEqualTo(AADB2CConstants.TEST_SIGN_UP_OR_IN_NAME); assertThat(prompt).isEqualTo(AADB2CConstants.TEST_PROMPT); assertThat(loginHint).isEqualTo(AADB2CConstants.TEST_LOGIN_HINT); }); }
assertThat(loginHint).isEqualTo(AADB2CConstants.TEST_LOGIN_HINT);
public void testPropertiesBean() { this.contextRunner.run(c -> { final AADB2CProperties properties = c.getBean(AADB2CProperties.class); assertThat(properties).isNotNull(); assertThat(properties.getTenant()).isEqualTo(AADB2CConstants.TEST_TENANT); assertThat(properties.getClientId()).isEqualTo(AADB2CConstants.TEST_CLIENT_ID); assertThat(properties.getClientSecret()).isEqualTo(AADB2CConstants.TEST_CLIENT_SECRET); assertThat(properties.getReplyUrl()).isEqualTo(AADB2CConstants.TEST_REPLY_URL); final String signUpOrSignIn = properties.getUserFlows().getSignUpOrSignIn(); final Object prompt = properties.getAuthenticateAdditionalParameters().get(AADB2CConstants.PROMPT); final String loginHint = String.valueOf(properties.getAuthenticateAdditionalParameters().get(AADB2CConstants.LOGIN_HINT)); assertThat(signUpOrSignIn).isEqualTo(AADB2CConstants.TEST_SIGN_UP_OR_IN_NAME); assertThat(prompt).isEqualTo(AADB2CConstants.TEST_PROMPT); assertThat(loginHint).isEqualTo(AADB2CConstants.TEST_LOGIN_HINT); }); }
class AADB2CAutoConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AADB2CAutoConfiguration.class)) .withPropertyValues( String.format("%s=%s", AADB2CConstants.TENANT, AADB2CConstants.TEST_TENANT), String.format("%s=%s", AADB2CConstants.CLIENT_ID, AADB2CConstants.TEST_CLIENT_ID), String.format("%s=%s", AADB2CConstants.CLIENT_SECRET, AADB2CConstants.TEST_CLIENT_SECRET), String.format("%s=%s", AADB2CConstants.REPLY_URL, AADB2CConstants.TEST_REPLY_URL), String.format("%s=%s", AADB2CConstants.LOGOUT_SUCCESS_URL, AADB2CConstants.TEST_LOGOUT_SUCCESS_URL), String.format("%s=%s", AADB2CConstants.SIGN_UP_OR_SIGN_IN, AADB2CConstants.TEST_SIGN_UP_OR_IN_NAME), String.format("%s=%s", AADB2CConstants.CONFIG_PROMPT, AADB2CConstants.TEST_PROMPT), String.format("%s=%s", AADB2CConstants.CONFIG_LOGIN_HINT, AADB2CConstants.TEST_LOGIN_HINT) ); @Test public void testAutoConfigurationBean() { this.contextRunner.run(c -> { final AADB2CAutoConfiguration config = c.getBean(AADB2CAutoConfiguration.class); assertThat(config).isNotNull(); }); } @Test @Test public void testAADB2CAuthorizationRequestResolverBean() { this.contextRunner.run(c -> { final AADB2CAuthorizationRequestResolver resolver = c.getBean(AADB2CAuthorizationRequestResolver.class); assertThat(resolver).isNotNull(); }); } @Test public void testLogoutSuccessHandlerBean() { this.contextRunner.run(c -> { final AADB2CLogoutSuccessHandler handler = c.getBean(AADB2CLogoutSuccessHandler.class); assertThat(handler).isNotNull(); }); } @Test public void testFilterBean() { this.contextRunner.run(c -> { final ClientRegistrationRepository repository = c.getBean(ClientRegistrationRepository.class); assertThat(repository).isNotNull(); }); } }
class AADB2CAutoConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withConfiguration(AutoConfigurations.of(AADB2CAutoConfiguration.class)) .withPropertyValues( String.format("%s=%s", AADB2CConstants.TENANT, AADB2CConstants.TEST_TENANT), String.format("%s=%s", AADB2CConstants.CLIENT_ID, AADB2CConstants.TEST_CLIENT_ID), String.format("%s=%s", AADB2CConstants.CLIENT_SECRET, AADB2CConstants.TEST_CLIENT_SECRET), String.format("%s=%s", AADB2CConstants.REPLY_URL, AADB2CConstants.TEST_REPLY_URL), String.format("%s=%s", AADB2CConstants.LOGOUT_SUCCESS_URL, AADB2CConstants.TEST_LOGOUT_SUCCESS_URL), String.format("%s=%s", AADB2CConstants.SIGN_UP_OR_SIGN_IN, AADB2CConstants.TEST_SIGN_UP_OR_IN_NAME), String.format("%s=%s", AADB2CConstants.CONFIG_PROMPT, AADB2CConstants.TEST_PROMPT), String.format("%s=%s", AADB2CConstants.CONFIG_LOGIN_HINT, AADB2CConstants.TEST_LOGIN_HINT) ); @Test public void testAutoConfigurationBean() { this.contextRunner.run(c -> { final AADB2CAutoConfiguration config = c.getBean(AADB2CAutoConfiguration.class); assertThat(config).isNotNull(); }); } @Test @Test public void testAADB2CAuthorizationRequestResolverBean() { this.contextRunner.run(c -> { final AADB2CAuthorizationRequestResolver resolver = c.getBean(AADB2CAuthorizationRequestResolver.class); assertThat(resolver).isNotNull(); }); } @Test public void testLogoutSuccessHandlerBean() { this.contextRunner.run(c -> { final AADB2CLogoutSuccessHandler handler = c.getBean(AADB2CLogoutSuccessHandler.class); assertThat(handler).isNotNull(); }); } @Test public void testFilterBean() { this.contextRunner.run(c -> { final ClientRegistrationRepository repository = c.getBean(ClientRegistrationRepository.class); assertThat(repository).isNotNull(); }); } }
Can we reuse this part of code?
public void testRefreshTokenConverter() { final String clientId = System.getenv(AAD_MULTI_TENANT_CLIENT_ID); final String clientSecret = System.getenv(AAD_MULTI_TENANT_CLIENT_SECRET); try (AppRunner app = new AppRunner(DumbApp.class)) { ChromeOptions options = new ChromeOptions(); options.addArguments("--incognito"); options.addArguments("--headless"); options.addArguments("--no-sandbox"); options.addArguments("--disable-dev-shm-usage"); WebDriver driver = new ChromeDriver(options); WebDriverWait wait = new WebDriverWait(driver, 10); app.property("azure.activedirectory.client-id", clientId); app.property("azure.activedirectory.client-secret", clientSecret); app.property("azure.activedirectory.user-group.allowed-groups", "group1,group2"); app.property("azure.activedirectory.authorization.office.scopes", "https: app.start(); try { driver.get(app.root() + "api/getAccessToken"); wait.until(presenceOfElementLocated(By.name("loginfmt"))) .sendKeys(System.getenv(AAD_USER_NAME_1) + Keys.ENTER); Thread.sleep(10000); driver.findElement(By.name("passwd")) .sendKeys(System.getenv(AAD_USER_PASSWORD_1) + Keys.ENTER); Thread.sleep(10000); driver.findElement(By.cssSelector("input[type='submit']")).click(); Thread.sleep(10000); Assert.assertTrue(driver.findElement(By.tagName("body")).getText().indexOf("profile") < 0); Assert.assertTrue(driver.findElement(By.tagName("body")).getText().indexOf("https: } catch (InterruptedException e) { throw new RuntimeException(e); } finally { driver.quit(); } } }
app.start();
public void testRefreshTokenConverter() { try (AppRunner app = new AppRunner(DumbApp.class)) { OAuthLoginUtils.addProperty(app); app.property("azure.activedirectory.authorization.office.scopes", "https: List<String> endPoints = new ArrayList<>(); endPoints.add("api/accessTokenScopes"); List<String> result = OAuthLoginUtils.get(app , endPoints); Assert.assertTrue(result.get(0).indexOf("profile") < 0); Assert.assertTrue(result.get(0).indexOf("https: } }
class AADWebAppRefreshTokenConverterIT { private final RestTemplate restTemplate = new RestTemplate(); static { final String directory = "src/test/resources/driver/"; final String chromedriverLinux = "chromedriver_linux64"; final String chromedriverWin32 = "chromedriver_win32.exe"; final String chromedriverMac = "chromedriver_mac64"; String osName = System.getProperty("os.name").toLowerCase(); Process process = null; try { File dir = new File(directory); if (Pattern.matches("linux.*", osName)) { process = Runtime.getRuntime().exec("chmod +x " + chromedriverLinux, null, dir); process.waitFor(); System.setProperty(ChromeDriverService.CHROME_DRIVER_EXE_PROPERTY, directory + chromedriverLinux); } else if (Pattern.matches("windows.*", osName)) { System.setProperty(ChromeDriverService.CHROME_DRIVER_EXE_PROPERTY, directory + chromedriverWin32); } else if (Pattern.matches("mac.*", osName)) { process = Runtime.getRuntime().exec("chmod +x " + chromedriverMac, null, dir); process.waitFor(); System.setProperty(ChromeDriverService.CHROME_DRIVER_EXE_PROPERTY, directory + chromedriverMac); } else { throw new IllegalStateException("Can not recognize osName. osName = " + System.getProperty("os.name")); } } catch (IllegalStateException e) { throw e; } catch (InterruptedException | IOException e) { throw new RuntimeException(e); } finally { if (process != null) { process.destroy(); } } } @Test @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @RestController public static class DumbApp extends AzureOAuth2Configuration { @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Override protected void configure(HttpSecurity http) throws Exception { super.configure(http); http.authorizeRequests() .anyRequest().authenticated() .and() .oauth2Login() .userInfoEndpoint() .oidcUserService(oidcUserService); } @GetMapping(value = "api/getAccessToken") public Set<String> groupsCount( @RegisteredOAuth2AuthorizedClient("office") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } } }
class AADWebAppRefreshTokenConverterIT { @Test @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @RestController public static class DumbApp { @GetMapping(value = "api/accessTokenScopes") public Set<String> accessTokenScopes( @RegisteredOAuth2AuthorizedClient("office") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } } }
Use `Map<String, String>` instead of `List<String>` to make it easier to read.
public void loginTest() { try (AppRunner app = new AppRunner(DumbApp.class)) { OAuthLoginUtils.addProperty(app); List<String> endPoints = new ArrayList<>(); endPoints.add("api/home"); endPoints.add("api/group1"); endPoints.add("api/status403"); List<String> result = OAuthLoginUtils.get(app , endPoints); Assert.assertEquals("home", result.get(0)); Assert.assertEquals("group1", result.get(1)); Assert.assertNotEquals("error", result.get(2)); } }
List<String> result = OAuthLoginUtils.get(app , endPoints);
public void loginTest() { try (AppRunner app = new AppRunner(DumbApp.class)) { OAuthLoginUtils.addProperty(app); List<String> endPoints = new ArrayList<>(); endPoints.add("api/home"); endPoints.add("api/group1"); endPoints.add("api/status403"); List<String> result = OAuthLoginUtils.get(app , endPoints); Assert.assertEquals("home", result.get(0)); Assert.assertEquals("group1", result.get(1)); Assert.assertNotEquals("error", result.get(2)); } }
class AADLoginIT { private static final Logger LOGGER = LoggerFactory.getLogger(AADLoginIT.class); @Test @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @RestController public static class DumbApp extends WebSecurityConfigurerAdapter { @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest().authenticated() .and() .oauth2Login() .userInfoEndpoint() .oidcUserService(oidcUserService); } @PreAuthorize("hasRole('ROLE_group1')") @GetMapping(value = "/api/group1") public ResponseEntity<String> group1() { return ResponseEntity.ok("group1"); } @GetMapping(value = "/api/home") public ResponseEntity<String> home(Principal principal) { LOGGER.info(((OAuth2AuthenticationToken) principal).getAuthorities().toString()); return ResponseEntity.ok("home"); } @PreAuthorize("hasRole('ROLE_fdsaliieammQiovlikIOWssIEURsafjFelasdfe')") @GetMapping(value = "/api/status403") public ResponseEntity<String> status403() { return ResponseEntity.ok("error"); } } }
class AADLoginIT { private static final Logger LOGGER = LoggerFactory.getLogger(AADLoginIT.class); @Test @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @RestController public static class DumbApp extends WebSecurityConfigurerAdapter { @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest().authenticated() .and() .oauth2Login() .userInfoEndpoint() .oidcUserService(oidcUserService); } @PreAuthorize("hasRole('ROLE_group1')") @GetMapping(value = "/api/group1") public ResponseEntity<String> group1() { return ResponseEntity.ok("group1"); } @GetMapping(value = "/api/home") public ResponseEntity<String> home(Principal principal) { LOGGER.info(((OAuth2AuthenticationToken) principal).getAuthorities().toString()); return ResponseEntity.ok("home"); } @PreAuthorize("hasRole('ROLE_fdsaliieammQiovlikIOWssIEURsafjFelasdfe')") @GetMapping(value = "/api/status403") public ResponseEntity<String> status403() { return ResponseEntity.ok("error"); } } }
doOnNext or doOnComplete
public Mono<VirtualNetworkGatewayConnection> createResourceAsync() { beforeCreating(); return myManager .serviceClient() .getVirtualNetworkGatewayConnections() .createOrUpdateAsync(this.resourceGroupName(), this.name(), this.innerModel()) .map(innerToFluentMap(this)) .flatMap(virtualNetworkGatewayConnection -> { if (updateSharedKey == null) { return Mono.just(virtualNetworkGatewayConnection); } return myManager.serviceClient().getVirtualNetworkGatewayConnections() .setSharedKeyAsync( this.resourceGroupName(), this.name(), new ConnectionSharedKeyInner().withValue(updateSharedKey)) .map(inner -> { updateSharedKey = null; return inner; }) .then(myManager.serviceClient().getVirtualNetworkGatewayConnections() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .map(innerToFluentMap(this))); }); }
})
public Mono<VirtualNetworkGatewayConnection> createResourceAsync() { beforeCreating(); return myManager .serviceClient() .getVirtualNetworkGatewayConnections() .createOrUpdateAsync(this.resourceGroupName(), this.name(), this.innerModel()) .map(innerToFluentMap(this)) .flatMap(virtualNetworkGatewayConnection -> { if (updateSharedKey == null) { return Mono.just(virtualNetworkGatewayConnection); } return myManager.serviceClient().getVirtualNetworkGatewayConnections() .setSharedKeyAsync( this.resourceGroupName(), this.name(), new ConnectionSharedKeyInner().withValue(updateSharedKey)) .doOnSuccess(inner -> { updateSharedKey = null; }) .then(myManager.serviceClient().getVirtualNetworkGatewayConnections() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .map(innerToFluentMap(this))); }); }
class VirtualNetworkGatewayConnectionImpl extends GroupableResourceImpl< VirtualNetworkGatewayConnection, VirtualNetworkGatewayConnectionInner, VirtualNetworkGatewayConnectionImpl, NetworkManager> implements VirtualNetworkGatewayConnection, VirtualNetworkGatewayConnection.Definition, VirtualNetworkGatewayConnection.Update, AppliableWithTags<VirtualNetworkGatewayConnection> { private final VirtualNetworkGateway parent; private String updateSharedKey; VirtualNetworkGatewayConnectionImpl( String name, VirtualNetworkGatewayImpl parent, VirtualNetworkGatewayConnectionInner inner) { super(name, inner, parent.manager()); this.parent = parent; } @Override public VirtualNetworkGateway parent() { return parent; } @Override public String authorizationKey() { return innerModel().authorizationKey(); } @Override public String virtualNetworkGateway1Id() { if (innerModel().virtualNetworkGateway1() == null) { return null; } return innerModel().virtualNetworkGateway1().id(); } @Override public String virtualNetworkGateway2Id() { if (innerModel().virtualNetworkGateway2() == null) { return null; } return innerModel().virtualNetworkGateway2().id(); } @Override public String localNetworkGateway2Id() { if (innerModel().localNetworkGateway2() == null) { return null; } return innerModel().localNetworkGateway2().id(); } @Override public VirtualNetworkGatewayConnectionType connectionType() { return innerModel().connectionType(); } @Override public int routingWeight() { return ResourceManagerUtils.toPrimitiveInt(innerModel().routingWeight()); } @Override public String sharedKey() { return innerModel().sharedKey(); } @Override public VirtualNetworkGatewayConnectionStatus connectionStatus() { return innerModel().connectionStatus(); } @Override public Collection<TunnelConnectionHealth> tunnelConnectionStatus() { return Collections.unmodifiableCollection(innerModel().tunnelConnectionStatus()); } @Override public long egressBytesTransferred() { return ResourceManagerUtils.toPrimitiveLong(innerModel().egressBytesTransferred()); } @Override public long ingressBytesTransferred() { return ResourceManagerUtils.toPrimitiveLong(innerModel().ingressBytesTransferred()); } @Override public String peerId() { return innerModel().peer() == null ? null : innerModel().peer().id(); } @Override public boolean isBgpEnabled() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().enableBgp()); } @Override public boolean usePolicyBasedTrafficSelectors() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().usePolicyBasedTrafficSelectors()); } @Override public Collection<IpsecPolicy> ipsecPolicies() { return Collections.unmodifiableCollection(innerModel().ipsecPolicies()); } @Override public String provisioningState() { return innerModel().provisioningState().toString(); } @Override public VirtualNetworkGatewayConnectionImpl withSiteToSite() { innerModel().withConnectionType(VirtualNetworkGatewayConnectionType.IPSEC); return this; } @Override public VirtualNetworkGatewayConnectionImpl withVNetToVNet() { innerModel().withConnectionType(VirtualNetworkGatewayConnectionType.VNET2VNET); return this; } @Override public VirtualNetworkGatewayConnectionImpl withExpressRoute(String circuitId) { innerModel().withConnectionType(VirtualNetworkGatewayConnectionType.EXPRESS_ROUTE); innerModel().withPeer(new SubResource().withId(circuitId)); return this; } @Override public VirtualNetworkGatewayConnectionImpl withExpressRoute(ExpressRouteCircuit circuit) { return withExpressRoute(circuit.id()); } @Override public VirtualNetworkGatewayConnectionImpl withLocalNetworkGateway(LocalNetworkGateway localNetworkGateway) { innerModel().withLocalNetworkGateway2(localNetworkGateway.innerModel()); return this; } @Override public VirtualNetworkGatewayConnectionImpl withSecondVirtualNetworkGateway( VirtualNetworkGateway virtualNetworkGateway2) { innerModel().withVirtualNetworkGateway2(virtualNetworkGateway2.innerModel()); return this; } @Override public VirtualNetworkGatewayConnectionImpl withSharedKey(String sharedKey) { if (isInCreateMode()) { innerModel().withSharedKey(sharedKey); } else { updateSharedKey = sharedKey; } return this; } @Override public VirtualNetworkGatewayConnectionImpl withBgp() { innerModel().withEnableBgp(true); return this; } @Override public VirtualNetworkGatewayConnectionImpl withoutBgp() { innerModel().withEnableBgp(false); return this; } @Override public VirtualNetworkGatewayConnectionImpl withAuthorization(String authorizationKey) { innerModel().withAuthorizationKey(authorizationKey); return this; } @Override protected Mono<VirtualNetworkGatewayConnectionInner> getInnerAsync() { return myManager .serviceClient() .getVirtualNetworkGatewayConnections() .getByResourceGroupAsync(resourceGroupName(), name()); } @Override private void beforeCreating() { innerModel().withVirtualNetworkGateway1(parent.innerModel()); } @Override public VirtualNetworkGatewayConnectionImpl updateTags() { return this; } @Override public VirtualNetworkGatewayConnection applyTags() { return applyTagsAsync().block(); } @Override public Mono<VirtualNetworkGatewayConnection> applyTagsAsync() { return this .manager() .serviceClient() .getVirtualNetworkGatewayConnections() .updateTagsAsync(resourceGroupName(), name(), innerModel().tags()) .flatMap(inner -> refreshAsync()); } }
class VirtualNetworkGatewayConnectionImpl extends GroupableResourceImpl< VirtualNetworkGatewayConnection, VirtualNetworkGatewayConnectionInner, VirtualNetworkGatewayConnectionImpl, NetworkManager> implements VirtualNetworkGatewayConnection, VirtualNetworkGatewayConnection.Definition, VirtualNetworkGatewayConnection.Update, AppliableWithTags<VirtualNetworkGatewayConnection> { private final VirtualNetworkGateway parent; private String updateSharedKey; VirtualNetworkGatewayConnectionImpl( String name, VirtualNetworkGatewayImpl parent, VirtualNetworkGatewayConnectionInner inner) { super(name, inner, parent.manager()); this.parent = parent; } @Override public VirtualNetworkGateway parent() { return parent; } @Override public String authorizationKey() { return innerModel().authorizationKey(); } @Override public String virtualNetworkGateway1Id() { if (innerModel().virtualNetworkGateway1() == null) { return null; } return innerModel().virtualNetworkGateway1().id(); } @Override public String virtualNetworkGateway2Id() { if (innerModel().virtualNetworkGateway2() == null) { return null; } return innerModel().virtualNetworkGateway2().id(); } @Override public String localNetworkGateway2Id() { if (innerModel().localNetworkGateway2() == null) { return null; } return innerModel().localNetworkGateway2().id(); } @Override public VirtualNetworkGatewayConnectionType connectionType() { return innerModel().connectionType(); } @Override public int routingWeight() { return ResourceManagerUtils.toPrimitiveInt(innerModel().routingWeight()); } @Override public String sharedKey() { return innerModel().sharedKey(); } @Override public VirtualNetworkGatewayConnectionStatus connectionStatus() { return innerModel().connectionStatus(); } @Override public Collection<TunnelConnectionHealth> tunnelConnectionStatus() { return Collections.unmodifiableCollection(innerModel().tunnelConnectionStatus()); } @Override public long egressBytesTransferred() { return ResourceManagerUtils.toPrimitiveLong(innerModel().egressBytesTransferred()); } @Override public long ingressBytesTransferred() { return ResourceManagerUtils.toPrimitiveLong(innerModel().ingressBytesTransferred()); } @Override public String peerId() { return innerModel().peer() == null ? null : innerModel().peer().id(); } @Override public boolean isBgpEnabled() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().enableBgp()); } @Override public boolean usePolicyBasedTrafficSelectors() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().usePolicyBasedTrafficSelectors()); } @Override public Collection<IpsecPolicy> ipsecPolicies() { return Collections.unmodifiableCollection(innerModel().ipsecPolicies()); } @Override public String provisioningState() { return innerModel().provisioningState().toString(); } @Override public VirtualNetworkGatewayConnectionImpl withSiteToSite() { innerModel().withConnectionType(VirtualNetworkGatewayConnectionType.IPSEC); return this; } @Override public VirtualNetworkGatewayConnectionImpl withVNetToVNet() { innerModel().withConnectionType(VirtualNetworkGatewayConnectionType.VNET2VNET); return this; } @Override public VirtualNetworkGatewayConnectionImpl withExpressRoute(String circuitId) { innerModel().withConnectionType(VirtualNetworkGatewayConnectionType.EXPRESS_ROUTE); innerModel().withPeer(new SubResource().withId(circuitId)); return this; } @Override public VirtualNetworkGatewayConnectionImpl withExpressRoute(ExpressRouteCircuit circuit) { return withExpressRoute(circuit.id()); } @Override public VirtualNetworkGatewayConnectionImpl withLocalNetworkGateway(LocalNetworkGateway localNetworkGateway) { innerModel().withLocalNetworkGateway2(localNetworkGateway.innerModel()); return this; } @Override public VirtualNetworkGatewayConnectionImpl withSecondVirtualNetworkGateway( VirtualNetworkGateway virtualNetworkGateway2) { innerModel().withVirtualNetworkGateway2(virtualNetworkGateway2.innerModel()); return this; } @Override public VirtualNetworkGatewayConnectionImpl withSharedKey(String sharedKey) { if (isInCreateMode()) { innerModel().withSharedKey(sharedKey); } else { updateSharedKey = sharedKey; } return this; } @Override public VirtualNetworkGatewayConnectionImpl withBgp() { innerModel().withEnableBgp(true); return this; } @Override public VirtualNetworkGatewayConnectionImpl withoutBgp() { innerModel().withEnableBgp(false); return this; } @Override public VirtualNetworkGatewayConnectionImpl withAuthorization(String authorizationKey) { innerModel().withAuthorizationKey(authorizationKey); return this; } @Override protected Mono<VirtualNetworkGatewayConnectionInner> getInnerAsync() { return myManager .serviceClient() .getVirtualNetworkGatewayConnections() .getByResourceGroupAsync(resourceGroupName(), name()); } @Override private void beforeCreating() { innerModel().withVirtualNetworkGateway1(parent.innerModel()); } @Override public VirtualNetworkGatewayConnectionImpl updateTags() { return this; } @Override public VirtualNetworkGatewayConnection applyTags() { return applyTagsAsync().block(); } @Override public Mono<VirtualNetworkGatewayConnection> applyTagsAsync() { return this .manager() .serviceClient() .getVirtualNetworkGatewayConnections() .updateTagsAsync(resourceGroupName(), name(), innerModel().tags()) .flatMap(inner -> refreshAsync()); } }
Better not change to doOnSuccess. Otherwise, the `then` failure will result a updateShareKey next time.
public Mono<VirtualNetworkGatewayConnection> createResourceAsync() { beforeCreating(); return myManager .serviceClient() .getVirtualNetworkGatewayConnections() .createOrUpdateAsync(this.resourceGroupName(), this.name(), this.innerModel()) .map(innerToFluentMap(this)) .flatMap(virtualNetworkGatewayConnection -> { if (updateSharedKey == null) { return Mono.just(virtualNetworkGatewayConnection); } return myManager.serviceClient().getVirtualNetworkGatewayConnections() .setSharedKeyAsync( this.resourceGroupName(), this.name(), new ConnectionSharedKeyInner().withValue(updateSharedKey)) .then(myManager.serviceClient().getVirtualNetworkGatewayConnections() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .map(innerToFluentMap(this))) .doOnSuccess(connection -> { updateSharedKey = null; }); }); }
});
public Mono<VirtualNetworkGatewayConnection> createResourceAsync() { beforeCreating(); return myManager .serviceClient() .getVirtualNetworkGatewayConnections() .createOrUpdateAsync(this.resourceGroupName(), this.name(), this.innerModel()) .map(innerToFluentMap(this)) .flatMap(virtualNetworkGatewayConnection -> { if (updateSharedKey == null) { return Mono.just(virtualNetworkGatewayConnection); } return myManager.serviceClient().getVirtualNetworkGatewayConnections() .setSharedKeyAsync( this.resourceGroupName(), this.name(), new ConnectionSharedKeyInner().withValue(updateSharedKey)) .doOnSuccess(inner -> { updateSharedKey = null; }) .then(myManager.serviceClient().getVirtualNetworkGatewayConnections() .getByResourceGroupAsync(this.resourceGroupName(), this.name()) .map(innerToFluentMap(this))); }); }
class VirtualNetworkGatewayConnectionImpl extends GroupableResourceImpl< VirtualNetworkGatewayConnection, VirtualNetworkGatewayConnectionInner, VirtualNetworkGatewayConnectionImpl, NetworkManager> implements VirtualNetworkGatewayConnection, VirtualNetworkGatewayConnection.Definition, VirtualNetworkGatewayConnection.Update, AppliableWithTags<VirtualNetworkGatewayConnection> { private final VirtualNetworkGateway parent; private String updateSharedKey; VirtualNetworkGatewayConnectionImpl( String name, VirtualNetworkGatewayImpl parent, VirtualNetworkGatewayConnectionInner inner) { super(name, inner, parent.manager()); this.parent = parent; } @Override public VirtualNetworkGateway parent() { return parent; } @Override public String authorizationKey() { return innerModel().authorizationKey(); } @Override public String virtualNetworkGateway1Id() { if (innerModel().virtualNetworkGateway1() == null) { return null; } return innerModel().virtualNetworkGateway1().id(); } @Override public String virtualNetworkGateway2Id() { if (innerModel().virtualNetworkGateway2() == null) { return null; } return innerModel().virtualNetworkGateway2().id(); } @Override public String localNetworkGateway2Id() { if (innerModel().localNetworkGateway2() == null) { return null; } return innerModel().localNetworkGateway2().id(); } @Override public VirtualNetworkGatewayConnectionType connectionType() { return innerModel().connectionType(); } @Override public int routingWeight() { return ResourceManagerUtils.toPrimitiveInt(innerModel().routingWeight()); } @Override public String sharedKey() { return innerModel().sharedKey(); } @Override public VirtualNetworkGatewayConnectionStatus connectionStatus() { return innerModel().connectionStatus(); } @Override public Collection<TunnelConnectionHealth> tunnelConnectionStatus() { return Collections.unmodifiableCollection(innerModel().tunnelConnectionStatus()); } @Override public long egressBytesTransferred() { return ResourceManagerUtils.toPrimitiveLong(innerModel().egressBytesTransferred()); } @Override public long ingressBytesTransferred() { return ResourceManagerUtils.toPrimitiveLong(innerModel().ingressBytesTransferred()); } @Override public String peerId() { return innerModel().peer() == null ? null : innerModel().peer().id(); } @Override public boolean isBgpEnabled() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().enableBgp()); } @Override public boolean usePolicyBasedTrafficSelectors() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().usePolicyBasedTrafficSelectors()); } @Override public Collection<IpsecPolicy> ipsecPolicies() { return Collections.unmodifiableCollection(innerModel().ipsecPolicies()); } @Override public String provisioningState() { return innerModel().provisioningState().toString(); } @Override public VirtualNetworkGatewayConnectionImpl withSiteToSite() { innerModel().withConnectionType(VirtualNetworkGatewayConnectionType.IPSEC); return this; } @Override public VirtualNetworkGatewayConnectionImpl withVNetToVNet() { innerModel().withConnectionType(VirtualNetworkGatewayConnectionType.VNET2VNET); return this; } @Override public VirtualNetworkGatewayConnectionImpl withExpressRoute(String circuitId) { innerModel().withConnectionType(VirtualNetworkGatewayConnectionType.EXPRESS_ROUTE); innerModel().withPeer(new SubResource().withId(circuitId)); return this; } @Override public VirtualNetworkGatewayConnectionImpl withExpressRoute(ExpressRouteCircuit circuit) { return withExpressRoute(circuit.id()); } @Override public VirtualNetworkGatewayConnectionImpl withLocalNetworkGateway(LocalNetworkGateway localNetworkGateway) { innerModel().withLocalNetworkGateway2(localNetworkGateway.innerModel()); return this; } @Override public VirtualNetworkGatewayConnectionImpl withSecondVirtualNetworkGateway( VirtualNetworkGateway virtualNetworkGateway2) { innerModel().withVirtualNetworkGateway2(virtualNetworkGateway2.innerModel()); return this; } @Override public VirtualNetworkGatewayConnectionImpl withSharedKey(String sharedKey) { if (isInCreateMode()) { innerModel().withSharedKey(sharedKey); } else { updateSharedKey = sharedKey; } return this; } @Override public VirtualNetworkGatewayConnectionImpl withBgp() { innerModel().withEnableBgp(true); return this; } @Override public VirtualNetworkGatewayConnectionImpl withoutBgp() { innerModel().withEnableBgp(false); return this; } @Override public VirtualNetworkGatewayConnectionImpl withAuthorization(String authorizationKey) { innerModel().withAuthorizationKey(authorizationKey); return this; } @Override protected Mono<VirtualNetworkGatewayConnectionInner> getInnerAsync() { return myManager .serviceClient() .getVirtualNetworkGatewayConnections() .getByResourceGroupAsync(resourceGroupName(), name()); } @Override private void beforeCreating() { innerModel().withVirtualNetworkGateway1(parent.innerModel()); } @Override public VirtualNetworkGatewayConnectionImpl updateTags() { return this; } @Override public VirtualNetworkGatewayConnection applyTags() { return applyTagsAsync().block(); } @Override public Mono<VirtualNetworkGatewayConnection> applyTagsAsync() { return this .manager() .serviceClient() .getVirtualNetworkGatewayConnections() .updateTagsAsync(resourceGroupName(), name(), innerModel().tags()) .flatMap(inner -> refreshAsync()); } }
class VirtualNetworkGatewayConnectionImpl extends GroupableResourceImpl< VirtualNetworkGatewayConnection, VirtualNetworkGatewayConnectionInner, VirtualNetworkGatewayConnectionImpl, NetworkManager> implements VirtualNetworkGatewayConnection, VirtualNetworkGatewayConnection.Definition, VirtualNetworkGatewayConnection.Update, AppliableWithTags<VirtualNetworkGatewayConnection> { private final VirtualNetworkGateway parent; private String updateSharedKey; VirtualNetworkGatewayConnectionImpl( String name, VirtualNetworkGatewayImpl parent, VirtualNetworkGatewayConnectionInner inner) { super(name, inner, parent.manager()); this.parent = parent; } @Override public VirtualNetworkGateway parent() { return parent; } @Override public String authorizationKey() { return innerModel().authorizationKey(); } @Override public String virtualNetworkGateway1Id() { if (innerModel().virtualNetworkGateway1() == null) { return null; } return innerModel().virtualNetworkGateway1().id(); } @Override public String virtualNetworkGateway2Id() { if (innerModel().virtualNetworkGateway2() == null) { return null; } return innerModel().virtualNetworkGateway2().id(); } @Override public String localNetworkGateway2Id() { if (innerModel().localNetworkGateway2() == null) { return null; } return innerModel().localNetworkGateway2().id(); } @Override public VirtualNetworkGatewayConnectionType connectionType() { return innerModel().connectionType(); } @Override public int routingWeight() { return ResourceManagerUtils.toPrimitiveInt(innerModel().routingWeight()); } @Override public String sharedKey() { return innerModel().sharedKey(); } @Override public VirtualNetworkGatewayConnectionStatus connectionStatus() { return innerModel().connectionStatus(); } @Override public Collection<TunnelConnectionHealth> tunnelConnectionStatus() { return Collections.unmodifiableCollection(innerModel().tunnelConnectionStatus()); } @Override public long egressBytesTransferred() { return ResourceManagerUtils.toPrimitiveLong(innerModel().egressBytesTransferred()); } @Override public long ingressBytesTransferred() { return ResourceManagerUtils.toPrimitiveLong(innerModel().ingressBytesTransferred()); } @Override public String peerId() { return innerModel().peer() == null ? null : innerModel().peer().id(); } @Override public boolean isBgpEnabled() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().enableBgp()); } @Override public boolean usePolicyBasedTrafficSelectors() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().usePolicyBasedTrafficSelectors()); } @Override public Collection<IpsecPolicy> ipsecPolicies() { return Collections.unmodifiableCollection(innerModel().ipsecPolicies()); } @Override public String provisioningState() { return innerModel().provisioningState().toString(); } @Override public VirtualNetworkGatewayConnectionImpl withSiteToSite() { innerModel().withConnectionType(VirtualNetworkGatewayConnectionType.IPSEC); return this; } @Override public VirtualNetworkGatewayConnectionImpl withVNetToVNet() { innerModel().withConnectionType(VirtualNetworkGatewayConnectionType.VNET2VNET); return this; } @Override public VirtualNetworkGatewayConnectionImpl withExpressRoute(String circuitId) { innerModel().withConnectionType(VirtualNetworkGatewayConnectionType.EXPRESS_ROUTE); innerModel().withPeer(new SubResource().withId(circuitId)); return this; } @Override public VirtualNetworkGatewayConnectionImpl withExpressRoute(ExpressRouteCircuit circuit) { return withExpressRoute(circuit.id()); } @Override public VirtualNetworkGatewayConnectionImpl withLocalNetworkGateway(LocalNetworkGateway localNetworkGateway) { innerModel().withLocalNetworkGateway2(localNetworkGateway.innerModel()); return this; } @Override public VirtualNetworkGatewayConnectionImpl withSecondVirtualNetworkGateway( VirtualNetworkGateway virtualNetworkGateway2) { innerModel().withVirtualNetworkGateway2(virtualNetworkGateway2.innerModel()); return this; } @Override public VirtualNetworkGatewayConnectionImpl withSharedKey(String sharedKey) { if (isInCreateMode()) { innerModel().withSharedKey(sharedKey); } else { updateSharedKey = sharedKey; } return this; } @Override public VirtualNetworkGatewayConnectionImpl withBgp() { innerModel().withEnableBgp(true); return this; } @Override public VirtualNetworkGatewayConnectionImpl withoutBgp() { innerModel().withEnableBgp(false); return this; } @Override public VirtualNetworkGatewayConnectionImpl withAuthorization(String authorizationKey) { innerModel().withAuthorizationKey(authorizationKey); return this; } @Override protected Mono<VirtualNetworkGatewayConnectionInner> getInnerAsync() { return myManager .serviceClient() .getVirtualNetworkGatewayConnections() .getByResourceGroupAsync(resourceGroupName(), name()); } @Override private void beforeCreating() { innerModel().withVirtualNetworkGateway1(parent.innerModel()); } @Override public VirtualNetworkGatewayConnectionImpl updateTags() { return this; } @Override public VirtualNetworkGatewayConnection applyTags() { return applyTagsAsync().block(); } @Override public Mono<VirtualNetworkGatewayConnection> applyTagsAsync() { return this .manager() .serviceClient() .getVirtualNetworkGatewayConnections() .updateTagsAsync(resourceGroupName(), name(), innerModel().tags()) .flatMap(inner -> refreshAsync()); } }
Hi, @lzc-1997-abel , please create another PR to set multiple scopes for graph and office. For both test and sample(with readme). ![image](https://user-images.githubusercontent.com/13167207/102970210-0f29a180-4532-11eb-9bab-c278ff40be37.png)
public void testRefreshTokenConverter() { try (AppRunner app = new AppRunner(DumbApp.class)) { SeleniumTestUtils.addProperty(app); app.property("azure.activedirectory.authorization.office.scopes", "https: app.property("azure.activedirectory.authorization.graph.scopes", "https: List<String> endPoints = new ArrayList<>(); endPoints.add("api/office"); endPoints.add("api/azure"); endPoints.add("api/graph"); endPoints.add("api/arm"); Map<String, String> result = SeleniumTestUtils.get(app, endPoints); Assert.assertFalse(result.get("api/office").contains("profile")); Assert.assertTrue(result.get("api/office").contains("https: Assert.assertTrue(result.get("api/azure").contains("profile")); Assert.assertTrue(result.get("api/azure").contains("https: Assert.assertTrue(result.get("api/graph").contains("profile")); Assert.assertTrue(result.get("api/graph").contains("https: Assert.assertNotEquals("error", result.get("api/arm")); } }
app.property("azure.activedirectory.authorization.graph.scopes", "https:
public void testRefreshTokenConverter() { try (AppRunner app = new AppRunner(DumbApp.class)) { SeleniumTestUtils.addProperty(app); app.property("azure.activedirectory.authorization.office.scopes", "https: app.property("azure.activedirectory.authorization.graph.scopes", "https: List<String> endPoints = new ArrayList<>(); endPoints.add("api/office"); endPoints.add("api/azure"); endPoints.add("api/graph"); endPoints.add("api/arm"); Map<String, String> result = SeleniumTestUtils.get(app, endPoints); Assert.assertFalse(result.get("api/office").contains("profile")); Assert.assertTrue(result.get("api/office").contains("https: Assert.assertTrue(result.get("api/azure").contains("profile")); Assert.assertTrue(result.get("api/azure").contains("https: Assert.assertTrue(result.get("api/graph").contains("profile")); Assert.assertTrue(result.get("api/graph").contains("https: Assert.assertNotEquals("error", result.get("api/arm")); } }
class RefreshTokenScopesIT { @Test @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @RestController public static class DumbApp { @GetMapping(value = "api/office") public Set<String> office( @RegisteredOAuth2AuthorizedClient("office") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "api/azure") public Set<String> azure( @RegisteredOAuth2AuthorizedClient("azure") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "api/graph") public Set<String> graph( @RegisteredOAuth2AuthorizedClient("graph") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "api/arm") public String arm( @RegisteredOAuth2AuthorizedClient("arm") OAuth2AuthorizedClient authorizedClient) { return "error"; } } }
class RefreshTokenScopesIT { @Test @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @RestController public static class DumbApp { @GetMapping(value = "api/office") public Set<String> office( @RegisteredOAuth2AuthorizedClient("office") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "api/azure") public Set<String> azure( @RegisteredOAuth2AuthorizedClient("azure") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "api/graph") public Set<String> graph( @RegisteredOAuth2AuthorizedClient("graph") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "api/arm") public String arm( @RegisteredOAuth2AuthorizedClient("arm") OAuth2AuthorizedClient authorizedClient) { return "error"; } } }
Why `office` and `graph` should contain `profile` scope? I remember `openid` and `profile` only needed for on-demand client.
public void testRefreshTokenConverter() { try (AppRunner app = new AppRunner(DumbApp.class)) { SeleniumTestUtils.addProperty(app); app.property("azure.activedirectory.authorization.office.scopes", "https: app.property("azure.activedirectory.authorization.graph.scopes", "https: List<String> endPoints = new ArrayList<>(); endPoints.add("api/office"); endPoints.add("api/azure"); endPoints.add("api/graph"); endPoints.add("api/arm"); Map<String, String> result = SeleniumTestUtils.get(app, endPoints); Assert.assertFalse(result.get("api/office").contains("profile")); Assert.assertTrue(result.get("api/office").contains("https: Assert.assertTrue(result.get("api/azure").contains("profile")); Assert.assertTrue(result.get("api/azure").contains("https: Assert.assertTrue(result.get("api/graph").contains("profile")); Assert.assertTrue(result.get("api/graph").contains("https: Assert.assertNotEquals("error", result.get("api/arm")); } }
Assert.assertFalse(result.get("api/office").contains("profile"));
public void testRefreshTokenConverter() { try (AppRunner app = new AppRunner(DumbApp.class)) { SeleniumTestUtils.addProperty(app); app.property("azure.activedirectory.authorization.office.scopes", "https: app.property("azure.activedirectory.authorization.graph.scopes", "https: List<String> endPoints = new ArrayList<>(); endPoints.add("api/office"); endPoints.add("api/azure"); endPoints.add("api/graph"); endPoints.add("api/arm"); Map<String, String> result = SeleniumTestUtils.get(app, endPoints); Assert.assertFalse(result.get("api/office").contains("profile")); Assert.assertTrue(result.get("api/office").contains("https: Assert.assertTrue(result.get("api/azure").contains("profile")); Assert.assertTrue(result.get("api/azure").contains("https: Assert.assertTrue(result.get("api/graph").contains("profile")); Assert.assertTrue(result.get("api/graph").contains("https: Assert.assertNotEquals("error", result.get("api/arm")); } }
class RefreshTokenScopesIT { @Test @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @RestController public static class DumbApp { @GetMapping(value = "api/office") public Set<String> office( @RegisteredOAuth2AuthorizedClient("office") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "api/azure") public Set<String> azure( @RegisteredOAuth2AuthorizedClient("azure") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "api/graph") public Set<String> graph( @RegisteredOAuth2AuthorizedClient("graph") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "api/arm") public String arm( @RegisteredOAuth2AuthorizedClient("arm") OAuth2AuthorizedClient authorizedClient) { return "error"; } } }
class RefreshTokenScopesIT { @Test @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @RestController public static class DumbApp { @GetMapping(value = "api/office") public Set<String> office( @RegisteredOAuth2AuthorizedClient("office") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "api/azure") public Set<String> azure( @RegisteredOAuth2AuthorizedClient("azure") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "api/graph") public Set<String> graph( @RegisteredOAuth2AuthorizedClient("graph") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "api/arm") public String arm( @RegisteredOAuth2AuthorizedClient("arm") OAuth2AuthorizedClient authorizedClient) { return "error"; } } }
Add comment like this: ``` // TODO remove deprecated method after we do not need to support spring-boot-2.2.x ```
protected LogoutSuccessHandler oidcLogoutSuccessHandler() { OidcClientInitiatedLogoutSuccessHandler oidcLogoutSuccessHandler = new OidcClientInitiatedLogoutSuccessHandler(this.repo); String uri = this.properties.getPostLogoutRedirectUri(); if(StringUtils.hasText(uri)){ oidcLogoutSuccessHandler.setPostLogoutRedirectUri(URI.create(uri)); } return oidcLogoutSuccessHandler; }
protected LogoutSuccessHandler oidcLogoutSuccessHandler() { OidcClientInitiatedLogoutSuccessHandler oidcLogoutSuccessHandler = new OidcClientInitiatedLogoutSuccessHandler(this.repo); String uri = this.properties.getPostLogoutRedirectUri(); if (StringUtils.hasText(uri)) { oidcLogoutSuccessHandler.setPostLogoutRedirectUri(URI.create(uri)); } return oidcLogoutSuccessHandler; }
class AzureOAuth2Configuration extends WebSecurityConfigurerAdapter { @Autowired private AADWebAppClientRegistrationRepository repo; @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Autowired protected AADAuthenticationProperties properties; @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest().authenticated() .and() .oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(requestResolver()) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()) .and() .userInfoEndpoint() .oidcUserService(oidcUserService) .and() .failureHandler(failureHandler()) .and() .logout() .logoutSuccessHandler(oidcLogoutSuccessHandler()) .and(); } protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); RestTemplate restTemplate = new RestTemplate(Arrays.asList( new FormHttpMessageConverter(), new OAuth2AccessTokenResponseHttpMessageConverter())); restTemplate.setErrorHandler(new AADConditionalAccessResponseErrorHandler()); result.setRestOperations(restTemplate); result.setRequestEntityConverter(new AuthzCodeGrantRequestEntityConverter(repo.getAzureClient())); return result; } protected OAuth2AuthorizationRequestResolver requestResolver() { return new AzureOAuth2AuthorizationRequestResolver(this.repo); } protected AuthenticationFailureHandler failureHandler() { return new AzureOAuthenticationFailureHandler(); } }
class AzureOAuth2Configuration extends WebSecurityConfigurerAdapter { @Autowired private AADWebAppClientRegistrationRepository repo; @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Autowired protected AADAuthenticationProperties properties; @Override protected void configure(HttpSecurity http) throws Exception { http.authorizeRequests() .anyRequest().authenticated() .and() .oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(requestResolver()) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()) .and() .userInfoEndpoint() .oidcUserService(oidcUserService) .and() .failureHandler(failureHandler()) .and() .logout() .logoutSuccessHandler(oidcLogoutSuccessHandler()) .and(); } protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); RestTemplate restTemplate = new RestTemplate(Arrays.asList( new FormHttpMessageConverter(), new OAuth2AccessTokenResponseHttpMessageConverter())); restTemplate.setErrorHandler(new AADConditionalAccessResponseErrorHandler()); result.setRestOperations(restTemplate); result.setRequestEntityConverter(new AuthzCodeGrantRequestEntityConverter(repo.getAzureClient())); return result; } protected OAuth2AuthorizationRequestResolver requestResolver() { return new AzureOAuth2AuthorizationRequestResolver(this.repo); } protected AuthenticationFailureHandler failureHandler() { return new AzureOAuthenticationFailureHandler(); } }
Any specific reason about `WebAsyncManagerIntegrationFilter`?
protected void configure(HttpSecurity http) throws Exception { http.oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(requestResolver()) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()) .and() .userInfoEndpoint() .oidcUserService(oidcUserService) .and() .and() .logout() .logoutSuccessHandler(oidcLogoutSuccessHandler()) .and() .addFilterBefore(handleConditionalAccessFilter(), WebAsyncManagerIntegrationFilter.class); }
.addFilterBefore(handleConditionalAccessFilter(), WebAsyncManagerIntegrationFilter.class);
protected void configure(HttpSecurity http) throws Exception { http.oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(requestResolver()) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()) .and() .userInfoEndpoint() .oidcUserService(oidcUserService) .and() .and() .logout() .logoutSuccessHandler(oidcLogoutSuccessHandler()) .and() .addFilterBefore(new AADHandleConditionalAccessFilter(), ExceptionTranslationFilter.class); }
class AADWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter { @Autowired private AADWebAppClientRegistrationRepository repo; @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Autowired protected AADAuthenticationProperties properties; @Override protected LogoutSuccessHandler oidcLogoutSuccessHandler() { OidcClientInitiatedLogoutSuccessHandler oidcLogoutSuccessHandler = new OidcClientInitiatedLogoutSuccessHandler(this.repo); String uri = this.properties.getPostLogoutRedirectUri(); if (StringUtils.hasText(uri)) { oidcLogoutSuccessHandler.setPostLogoutRedirectUri(URI.create(uri)); } return oidcLogoutSuccessHandler; } protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter( new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(repo.getAzureClient())); return result; } protected OAuth2AuthorizationRequestResolver requestResolver() { return new AADOAuth2AuthorizationRequestResolver(this.repo); } protected AADHandleConditionalAccessFilter handleConditionalAccessFilter() { return new AADHandleConditionalAccessFilter(); } }
class AADWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter { @Autowired private AADWebAppClientRegistrationRepository repo; @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Autowired protected AADAuthenticationProperties properties; @Override protected LogoutSuccessHandler oidcLogoutSuccessHandler() { OidcClientInitiatedLogoutSuccessHandler oidcLogoutSuccessHandler = new OidcClientInitiatedLogoutSuccessHandler(this.repo); String uri = this.properties.getPostLogoutRedirectUri(); if (StringUtils.hasText(uri)) { oidcLogoutSuccessHandler.setPostLogoutRedirectUri(URI.create(uri)); } return oidcLogoutSuccessHandler; } protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter( new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(repo.getAzureClient())); return result; } protected OAuth2AuthorizationRequestResolver requestResolver() { return new AADOAuth2AuthorizationRequestResolver(this.repo); } }
When we need add a filter, spring security requires us to specify the [registration order,](https://github.com/spring-projects/spring-security/blob/master/config/src/main/java/org/springframework/security/config/annotation/web/builders/FilterComparator.java#L54). I didn't find that it should be added after a specific filter(it works after all filters), so I added our custom filter to the first(`WebAsyncManagerIntegrationFilter` is the first filter in filterChain).
protected void configure(HttpSecurity http) throws Exception { http.oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(requestResolver()) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()) .and() .userInfoEndpoint() .oidcUserService(oidcUserService) .and() .and() .logout() .logoutSuccessHandler(oidcLogoutSuccessHandler()) .and() .addFilterBefore(handleConditionalAccessFilter(), WebAsyncManagerIntegrationFilter.class); }
.addFilterBefore(handleConditionalAccessFilter(), WebAsyncManagerIntegrationFilter.class);
protected void configure(HttpSecurity http) throws Exception { http.oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(requestResolver()) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()) .and() .userInfoEndpoint() .oidcUserService(oidcUserService) .and() .and() .logout() .logoutSuccessHandler(oidcLogoutSuccessHandler()) .and() .addFilterBefore(new AADHandleConditionalAccessFilter(), ExceptionTranslationFilter.class); }
class AADWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter { @Autowired private AADWebAppClientRegistrationRepository repo; @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Autowired protected AADAuthenticationProperties properties; @Override protected LogoutSuccessHandler oidcLogoutSuccessHandler() { OidcClientInitiatedLogoutSuccessHandler oidcLogoutSuccessHandler = new OidcClientInitiatedLogoutSuccessHandler(this.repo); String uri = this.properties.getPostLogoutRedirectUri(); if (StringUtils.hasText(uri)) { oidcLogoutSuccessHandler.setPostLogoutRedirectUri(URI.create(uri)); } return oidcLogoutSuccessHandler; } protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter( new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(repo.getAzureClient())); return result; } protected OAuth2AuthorizationRequestResolver requestResolver() { return new AADOAuth2AuthorizationRequestResolver(this.repo); } protected AADHandleConditionalAccessFilter handleConditionalAccessFilter() { return new AADHandleConditionalAccessFilter(); } }
class AADWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter { @Autowired private AADWebAppClientRegistrationRepository repo; @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Autowired protected AADAuthenticationProperties properties; @Override protected LogoutSuccessHandler oidcLogoutSuccessHandler() { OidcClientInitiatedLogoutSuccessHandler oidcLogoutSuccessHandler = new OidcClientInitiatedLogoutSuccessHandler(this.repo); String uri = this.properties.getPostLogoutRedirectUri(); if (StringUtils.hasText(uri)) { oidcLogoutSuccessHandler.setPostLogoutRedirectUri(URI.create(uri)); } return oidcLogoutSuccessHandler; } protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter( new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(repo.getAzureClient())); return result; } protected OAuth2AuthorizationRequestResolver requestResolver() { return new AADOAuth2AuthorizationRequestResolver(this.repo); } }
If there is no `WebAsyncManagerIntegrationFilter` in the context, will it throw exception? Can we make sure that `WebAsyncManagerIntegrationFilter` always exist?
protected void configure(HttpSecurity http) throws Exception { http.oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(requestResolver()) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()) .and() .userInfoEndpoint() .oidcUserService(oidcUserService) .and() .and() .logout() .logoutSuccessHandler(oidcLogoutSuccessHandler()) .and() .addFilterBefore(handleConditionalAccessFilter(), WebAsyncManagerIntegrationFilter.class); }
.addFilterBefore(handleConditionalAccessFilter(), WebAsyncManagerIntegrationFilter.class);
protected void configure(HttpSecurity http) throws Exception { http.oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(requestResolver()) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()) .and() .userInfoEndpoint() .oidcUserService(oidcUserService) .and() .and() .logout() .logoutSuccessHandler(oidcLogoutSuccessHandler()) .and() .addFilterBefore(new AADHandleConditionalAccessFilter(), ExceptionTranslationFilter.class); }
class AADWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter { @Autowired private AADWebAppClientRegistrationRepository repo; @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Autowired protected AADAuthenticationProperties properties; @Override protected LogoutSuccessHandler oidcLogoutSuccessHandler() { OidcClientInitiatedLogoutSuccessHandler oidcLogoutSuccessHandler = new OidcClientInitiatedLogoutSuccessHandler(this.repo); String uri = this.properties.getPostLogoutRedirectUri(); if (StringUtils.hasText(uri)) { oidcLogoutSuccessHandler.setPostLogoutRedirectUri(URI.create(uri)); } return oidcLogoutSuccessHandler; } protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter( new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(repo.getAzureClient())); return result; } protected OAuth2AuthorizationRequestResolver requestResolver() { return new AADOAuth2AuthorizationRequestResolver(this.repo); } protected AADHandleConditionalAccessFilter handleConditionalAccessFilter() { return new AADHandleConditionalAccessFilter(); } }
class AADWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter { @Autowired private AADWebAppClientRegistrationRepository repo; @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Autowired protected AADAuthenticationProperties properties; @Override protected LogoutSuccessHandler oidcLogoutSuccessHandler() { OidcClientInitiatedLogoutSuccessHandler oidcLogoutSuccessHandler = new OidcClientInitiatedLogoutSuccessHandler(this.repo); String uri = this.properties.getPostLogoutRedirectUri(); if (StringUtils.hasText(uri)) { oidcLogoutSuccessHandler.setPostLogoutRedirectUri(URI.create(uri)); } return oidcLogoutSuccessHandler; } protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter( new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(repo.getAzureClient())); return result; } protected OAuth2AuthorizationRequestResolver requestResolver() { return new AADOAuth2AuthorizationRequestResolver(this.repo); } }
Please add integration test for conditional access. It's OK to do it after this PR merged.
protected void configure(HttpSecurity http) throws Exception { http.oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(requestResolver()) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()) .and() .userInfoEndpoint() .oidcUserService(oidcUserService) .and() .and() .logout() .logoutSuccessHandler(oidcLogoutSuccessHandler()) .and() .addFilterBefore(handleConditionalAccessFilter(), WebAsyncManagerIntegrationFilter.class); }
.addFilterBefore(handleConditionalAccessFilter(), WebAsyncManagerIntegrationFilter.class);
protected void configure(HttpSecurity http) throws Exception { http.oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(requestResolver()) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()) .and() .userInfoEndpoint() .oidcUserService(oidcUserService) .and() .and() .logout() .logoutSuccessHandler(oidcLogoutSuccessHandler()) .and() .addFilterBefore(new AADHandleConditionalAccessFilter(), ExceptionTranslationFilter.class); }
class AADWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter { @Autowired private AADWebAppClientRegistrationRepository repo; @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Autowired protected AADAuthenticationProperties properties; @Override protected LogoutSuccessHandler oidcLogoutSuccessHandler() { OidcClientInitiatedLogoutSuccessHandler oidcLogoutSuccessHandler = new OidcClientInitiatedLogoutSuccessHandler(this.repo); String uri = this.properties.getPostLogoutRedirectUri(); if (StringUtils.hasText(uri)) { oidcLogoutSuccessHandler.setPostLogoutRedirectUri(URI.create(uri)); } return oidcLogoutSuccessHandler; } protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter( new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(repo.getAzureClient())); return result; } protected OAuth2AuthorizationRequestResolver requestResolver() { return new AADOAuth2AuthorizationRequestResolver(this.repo); } protected AADHandleConditionalAccessFilter handleConditionalAccessFilter() { return new AADHandleConditionalAccessFilter(); } }
class AADWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter { @Autowired private AADWebAppClientRegistrationRepository repo; @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Autowired protected AADAuthenticationProperties properties; @Override protected LogoutSuccessHandler oidcLogoutSuccessHandler() { OidcClientInitiatedLogoutSuccessHandler oidcLogoutSuccessHandler = new OidcClientInitiatedLogoutSuccessHandler(this.repo); String uri = this.properties.getPostLogoutRedirectUri(); if (StringUtils.hasText(uri)) { oidcLogoutSuccessHandler.setPostLogoutRedirectUri(URI.create(uri)); } return oidcLogoutSuccessHandler; } protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter( new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(repo.getAzureClient())); return result; } protected OAuth2AuthorizationRequestResolver requestResolver() { return new AADOAuth2AuthorizationRequestResolver(this.repo); } }
By doing this, do you mean `ExceptionTranslationFilter` will always exist in context?
protected void configure(HttpSecurity http) throws Exception { http.oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(requestResolver()) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()) .and() .userInfoEndpoint() .oidcUserService(oidcUserService) .and() .and() .logout() .logoutSuccessHandler(oidcLogoutSuccessHandler()) .and() .addFilterBefore(new AADHandleConditionalAccessFilter(), ExceptionTranslationFilter.class); }
.addFilterBefore(new AADHandleConditionalAccessFilter(), ExceptionTranslationFilter.class);
protected void configure(HttpSecurity http) throws Exception { http.oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(requestResolver()) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()) .and() .userInfoEndpoint() .oidcUserService(oidcUserService) .and() .and() .logout() .logoutSuccessHandler(oidcLogoutSuccessHandler()) .and() .addFilterBefore(new AADHandleConditionalAccessFilter(), ExceptionTranslationFilter.class); }
class AADWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter { @Autowired private AADWebAppClientRegistrationRepository repo; @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Autowired protected AADAuthenticationProperties properties; @Override protected LogoutSuccessHandler oidcLogoutSuccessHandler() { OidcClientInitiatedLogoutSuccessHandler oidcLogoutSuccessHandler = new OidcClientInitiatedLogoutSuccessHandler(this.repo); String uri = this.properties.getPostLogoutRedirectUri(); if (StringUtils.hasText(uri)) { oidcLogoutSuccessHandler.setPostLogoutRedirectUri(URI.create(uri)); } return oidcLogoutSuccessHandler; } protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter( new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(repo.getAzureClient())); return result; } protected OAuth2AuthorizationRequestResolver requestResolver() { return new AADOAuth2AuthorizationRequestResolver(this.repo); } }
class AADWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter { @Autowired private AADWebAppClientRegistrationRepository repo; @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Autowired protected AADAuthenticationProperties properties; @Override protected LogoutSuccessHandler oidcLogoutSuccessHandler() { OidcClientInitiatedLogoutSuccessHandler oidcLogoutSuccessHandler = new OidcClientInitiatedLogoutSuccessHandler(this.repo); String uri = this.properties.getPostLogoutRedirectUri(); if (StringUtils.hasText(uri)) { oidcLogoutSuccessHandler.setPostLogoutRedirectUri(URI.create(uri)); } return oidcLogoutSuccessHandler; } protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter( new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(repo.getAzureClient())); return result; } protected OAuth2AuthorizationRequestResolver requestResolver() { return new AADOAuth2AuthorizationRequestResolver(this.repo); } }
Yes, this filter handles most authorization failures and always loaded into the filterchain.
protected void configure(HttpSecurity http) throws Exception { http.oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(requestResolver()) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()) .and() .userInfoEndpoint() .oidcUserService(oidcUserService) .and() .and() .logout() .logoutSuccessHandler(oidcLogoutSuccessHandler()) .and() .addFilterBefore(new AADHandleConditionalAccessFilter(), ExceptionTranslationFilter.class); }
.addFilterBefore(new AADHandleConditionalAccessFilter(), ExceptionTranslationFilter.class);
protected void configure(HttpSecurity http) throws Exception { http.oauth2Login() .authorizationEndpoint() .authorizationRequestResolver(requestResolver()) .and() .tokenEndpoint() .accessTokenResponseClient(accessTokenResponseClient()) .and() .userInfoEndpoint() .oidcUserService(oidcUserService) .and() .and() .logout() .logoutSuccessHandler(oidcLogoutSuccessHandler()) .and() .addFilterBefore(new AADHandleConditionalAccessFilter(), ExceptionTranslationFilter.class); }
class AADWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter { @Autowired private AADWebAppClientRegistrationRepository repo; @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Autowired protected AADAuthenticationProperties properties; @Override protected LogoutSuccessHandler oidcLogoutSuccessHandler() { OidcClientInitiatedLogoutSuccessHandler oidcLogoutSuccessHandler = new OidcClientInitiatedLogoutSuccessHandler(this.repo); String uri = this.properties.getPostLogoutRedirectUri(); if (StringUtils.hasText(uri)) { oidcLogoutSuccessHandler.setPostLogoutRedirectUri(URI.create(uri)); } return oidcLogoutSuccessHandler; } protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter( new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(repo.getAzureClient())); return result; } protected OAuth2AuthorizationRequestResolver requestResolver() { return new AADOAuth2AuthorizationRequestResolver(this.repo); } }
class AADWebSecurityConfigurerAdapter extends WebSecurityConfigurerAdapter { @Autowired private AADWebAppClientRegistrationRepository repo; @Autowired private OAuth2UserService<OidcUserRequest, OidcUser> oidcUserService; @Autowired protected AADAuthenticationProperties properties; @Override protected LogoutSuccessHandler oidcLogoutSuccessHandler() { OidcClientInitiatedLogoutSuccessHandler oidcLogoutSuccessHandler = new OidcClientInitiatedLogoutSuccessHandler(this.repo); String uri = this.properties.getPostLogoutRedirectUri(); if (StringUtils.hasText(uri)) { oidcLogoutSuccessHandler.setPostLogoutRedirectUri(URI.create(uri)); } return oidcLogoutSuccessHandler; } protected OAuth2AccessTokenResponseClient<OAuth2AuthorizationCodeGrantRequest> accessTokenResponseClient() { DefaultAuthorizationCodeTokenResponseClient result = new DefaultAuthorizationCodeTokenResponseClient(); result.setRequestEntityConverter( new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(repo.getAzureClient())); return result; } protected OAuth2AuthorizationRequestResolver requestResolver() { return new AADOAuth2AuthorizationRequestResolver(this.repo); } }
Can we add checks on if the authorization code flow is executed for "arm" client or the same checks as above?
public void testAccessTokenScopes() throws InterruptedException { Map<String, String> arguments = new HashMap<>(); arguments.put( "azure.activedirectory.authorization-clients.office.scopes", "https: + "https: arguments.put( "azure.activedirectory.authorization-clients.graph.scopes", "https: AADSeleniumITHelper aadSeleniumITHelper = new AADSeleniumITHelper(DumbApp.class, arguments); String httpResponse = aadSeleniumITHelper.httpGet("accessTokenScopes/azure"); Assert.assertTrue(httpResponse.contains("profile")); Assert.assertTrue(httpResponse.contains("https: Assert.assertTrue(httpResponse.contains("https: httpResponse = aadSeleniumITHelper.httpGet("accessTokenScopes/graph"); Assert.assertTrue(httpResponse.contains("profile")); Assert.assertTrue(httpResponse.contains("https: Assert.assertTrue(httpResponse.contains("https: httpResponse = aadSeleniumITHelper.httpGet("accessTokenScopes/office"); Assert.assertFalse(httpResponse.contains("profile")); Assert.assertTrue(httpResponse.contains("https: Assert.assertTrue(httpResponse.contains("https: Assert.assertTrue(httpResponse.contains("https: httpResponse = aadSeleniumITHelper.httpGet("arm"); Assert.assertNotEquals(httpResponse, "arm"); }
}
public void testAccessTokenScopes() throws InterruptedException { Map<String, String> arguments = new HashMap<>(); arguments.put( "azure.activedirectory.authorization-clients.office.scopes", "https: + "https: arguments.put( "azure.activedirectory.authorization-clients.graph.scopes", "https: AADSeleniumITHelper aadSeleniumITHelper = new AADSeleniumITHelper(DumbApp.class, arguments); String httpResponse = aadSeleniumITHelper.httpGet("accessTokenScopes/azure"); Assert.assertTrue(httpResponse.contains("profile")); Assert.assertTrue(httpResponse.contains("https: Assert.assertTrue(httpResponse.contains("https: httpResponse = aadSeleniumITHelper.httpGet("accessTokenScopes/graph"); Assert.assertTrue(httpResponse.contains("profile")); Assert.assertTrue(httpResponse.contains("https: Assert.assertTrue(httpResponse.contains("https: httpResponse = aadSeleniumITHelper.httpGet("accessTokenScopes/office"); Assert.assertFalse(httpResponse.contains("profile")); Assert.assertTrue(httpResponse.contains("https: Assert.assertTrue(httpResponse.contains("https: Assert.assertTrue(httpResponse.contains("https: httpResponse = aadSeleniumITHelper.httpGet("arm"); Assert.assertNotEquals(httpResponse, "arm"); }
class AADAccessTokenScopesIT { @Test @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @RestController public static class DumbApp { @GetMapping(value = "accessTokenScopes/azure") public Set<String> azure( @RegisteredOAuth2AuthorizedClient("azure") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "accessTokenScopes/graph") public Set<String> graph( @RegisteredOAuth2AuthorizedClient("graph") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "accessTokenScopes/office") public Set<String> office( @RegisteredOAuth2AuthorizedClient("office") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "arm") public String arm( @RegisteredOAuth2AuthorizedClient("arm") OAuth2AuthorizedClient authorizedClient) { return "arm"; } } }
class AADAccessTokenScopesIT { @Test @EnableGlobalMethodSecurity(securedEnabled = true, prePostEnabled = true) @SpringBootApplication @RestController public static class DumbApp { @GetMapping(value = "accessTokenScopes/azure") public Set<String> azure( @RegisteredOAuth2AuthorizedClient("azure") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "accessTokenScopes/graph") public Set<String> graph( @RegisteredOAuth2AuthorizedClient("graph") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "accessTokenScopes/office") public Set<String> office( @RegisteredOAuth2AuthorizedClient("office") OAuth2AuthorizedClient authorizedClient) { return Optional.of(authorizedClient) .map(OAuth2AuthorizedClient::getAccessToken) .map(OAuth2AccessToken::getScopes) .orElse(null); } @GetMapping(value = "arm") public String arm( @RegisteredOAuth2AuthorizedClient("arm") OAuth2AuthorizedClient authorizedClient) { return "arm"; } } }
This check is to prevent multiple appends if request is retried. I.e. Storage scopes auth policy per each retry.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { url = url + "?" + signature; } else if (!query.contains(signature)) { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
} else if (!query.contains(signature)) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { if (url.endsWith("?")) { url = url + signature; } else { url = url + "?" + signature; } } else { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
Based on this should this policy implement https://github.com/Azure/azure-sdk-for-java/blob/master/sdk/core/azure-core/src/main/java/com/azure/core/http/policy/HttpPipelinePolicy.java#L36 with `PER_CALL`? Having this logic and the positional change are another route as well, to explicitly prevent duplicate SAS parameters.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { url = url + "?" + signature; } else if (!query.contains(signature)) { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
} else if (!query.contains(signature)) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { if (url.endsWith("?")) { url = url + signature; } else { url = url + "?" + signature; } } else { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
Do we want to clean the signature on construction of the credential to reduce the number of times we need to substring?
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { url = url + "?" + signature; } else if (!query.contains(signature)) { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
if (signature.startsWith("?")) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { if (url.endsWith("?")) { url = url + signature; } else { url = url + "?" + signature; } } else { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
Is there a possible case where we have a URL with an empty query, such as `https:://example.com?`, where this condition passes and we end up with double `?`s
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { url = url + "?" + signature; } else if (!query.contains(signature)) { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
if (query == null || query.isEmpty()) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { if (url.endsWith("?")) { url = url + signature; } else { url = url + "?" + signature; } } else { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
``` AzureSasCredential credential = AzureSasCredential("?foo"); System.out.println(credential.getSignature()); // this would print "foo" ``` I was thinking about it but I thought it would be good to make sure that what we feed into constructor is the same what getSignature() returns.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { url = url + "?" + signature; } else if (!query.contains(signature)) { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
if (signature.startsWith("?")) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { if (url.endsWith("?")) { url = url + signature; } else { url = url + "?" + signature; } } else { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
I took a look how `getPipelinePosition` is used. It seems to be suited more towards users providing custom policies through client builders. I.e. I don't see any sdk checking this property while adding auth policy to the pipeline. So this wouldn't serve as strong enforcement but rather suggestion. Therefore I'd rather keep this check to prevent surprise should some sdk wrongly scope this. I can override the getPipelinePosition to indicate where it should be placed.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { url = url + "?" + signature; } else if (!query.contains(signature)) { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
} else if (!query.contains(signature)) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { if (url.endsWith("?")) { url = url + signature; } else { url = url + "?" + signature; } } else { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
good catch.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { url = url + "?" + signature; } else if (!query.contains(signature)) { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
if (query == null || query.isEmpty()) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { if (url.endsWith("?")) { url = url + signature; } else { url = url + "?" + signature; } } else { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
For this policy I would override `getPipelinePosition` as we expect to add the SAS token only once for the initial request and any number of retries (unlike shared key where the Date header is taken into account so it needs recalculation per retry).
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { url = url + "?" + signature; } else if (!query.contains(signature)) { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
} else if (!query.contains(signature)) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { if (url.endsWith("?")) { url = url + signature; } else { url = url + "?" + signature; } } else { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
Makes sense to me, the sub-stringing is only a minor cost anyhow.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { url = url + "?" + signature; } else if (!query.contains(signature)) { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
if (signature.startsWith("?")) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { if (url.endsWith("?")) { url = url + signature; } else { url = url + "?" + signature; } } else { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
Sounds good. I removed this check and getPipelinePosition is now overriden in this class.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { url = url + "?" + signature; } else if (!query.contains(signature)) { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
} else if (!query.contains(signature)) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { if (url.endsWith("?")) { url = url + signature; } else { url = url + "?" + signature; } } else { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
Stripping out the leading `?` may work for storage but will there be other services where the leading `?` would still be required?
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { url = url + "?" + signature; } else if (!query.contains(signature)) { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
if (signature.startsWith("?")) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpRequest httpRequest = context.getHttpRequest(); if ("http".equals(httpRequest.getUrl().getProtocol())) { return Mono.error(new IllegalStateException( "Shared access signature credentials require HTTPS to prevent leaking the shared access signature.")); } String signature = credential.getSignature(); if (signature.startsWith("?")) { signature = signature.substring(1); } String query = httpRequest.getUrl().getQuery(); String url = httpRequest.getUrl().toString(); if (query == null || query.isEmpty()) { if (url.endsWith("?")) { url = url + signature; } else { url = url + "?" + signature; } } else { url = url + "&" + signature; } httpRequest.setUrl(url); return next.process(); }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
class AzureSasCredentialPolicy implements HttpPipelinePolicy { private final AzureSasCredential credential; /** * Creates a policy that uses the passed {@link AzureSasCredential} to append sas to query string. * * @param credential The {@link AzureSasCredential} containing the shared access signature to use. * @throws NullPointerException If {@code credential} is {@code null}. */ public AzureSasCredentialPolicy(AzureSasCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); this.credential = credential; } @Override }
nit: this line can be deleted
public void sasCredentialsTest(String signature, String url, String expectedUrl) throws Exception { AzureSasCredential credential = new AzureSasCredential(signature); HttpPipelinePolicy auditorPolicy = (context, next) -> { String actualUrl = context.getHttpRequest().getUrl().toString(); Assertions.assertEquals(expectedUrl, actualUrl); return next.process(); }; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient()) .policies(new AzureSasCredentialPolicy(credential), auditorPolicy) .build(); HttpRequest request = new HttpRequest(HttpMethod.GET, new URL(url)); pipeline.send(request).block(); }
public void sasCredentialsTest(String signature, String url, String expectedUrl) throws Exception { AzureSasCredential credential = new AzureSasCredential(signature); HttpPipelinePolicy auditorPolicy = (context, next) -> { String actualUrl = context.getHttpRequest().getUrl().toString(); Assertions.assertEquals(expectedUrl, actualUrl); return next.process(); }; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient()) .policies(new AzureSasCredentialPolicy(credential), auditorPolicy) .build(); HttpRequest request = new HttpRequest(HttpMethod.GET, new URL(url)); pipeline.send(request).block(); }
class CredentialsTests { @Test public void basicCredentialsTest() throws Exception { BasicAuthenticationCredential credentials = new BasicAuthenticationCredential("user", "pass"); HttpPipelinePolicy auditorPolicy = (context, next) -> { String headerValue = context.getHttpRequest().getHeaders().getValue("Authorization"); Assertions.assertEquals("Basic dXNlcjpwYXNz", headerValue); return next.process(); }; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient()) .policies((context, next) -> credentials.getToken(new TokenRequestContext().addScopes("scope./default")) .flatMap(token -> { context.getHttpRequest().getHeaders().put("Authorization", "Basic " + token.getToken()); return next.process(); }), auditorPolicy) .build(); HttpRequest request = new HttpRequest(HttpMethod.GET, new URL("http: pipeline.send(request).block(); } @Test public void tokenCredentialTest() throws Exception { TokenCredential credentials = request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)); HttpPipelinePolicy auditorPolicy = (context, next) -> { String headerValue = context.getHttpRequest().getHeaders().getValue("Authorization"); Assertions.assertEquals("Bearer this_is_a_token", headerValue); return next.process(); }; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient()) .policies(new BearerTokenAuthenticationPolicy(credentials, "scope./default"), auditorPolicy) .build(); HttpRequest request = new HttpRequest(HttpMethod.GET, new URL("https: pipeline.send(request).block(); } @Test public void tokenCredentialHttpSchemeTest() throws Exception { TokenCredential credentials = request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)); HttpPipelinePolicy auditorPolicy = (context, next) -> { String headerValue = context.getHttpRequest().getHeaders().getValue("Authorization"); Assertions.assertEquals("Bearer this_is_a_token", headerValue); return next.process(); }; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient()) .policies(new BearerTokenAuthenticationPolicy(credentials, "scope./default"), auditorPolicy) .build(); HttpRequest request = new HttpRequest(HttpMethod.GET, new URL("http: StepVerifier.create(pipeline.send(request)) .expectErrorMessage("token credentials require a URL using the HTTPS protocol scheme") .verify(); } @ParameterizedTest @CsvSource( { "test_signature,https: "?test_signature,https: "test_signature,https: "?test_signature,https: "test_signature,https: "?test_signature,https: @Test public void sasCredentialsHTTPSchemeTest() throws Exception { AzureSasCredential credential = new AzureSasCredential("foo"); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient()) .policies(new AzureSasCredentialPolicy(credential)) .build(); HttpRequest request = new HttpRequest(HttpMethod.GET, new URL("http: StepVerifier.create(pipeline.send(request)) .expectErrorMessage("Shared access signature credentials require HTTPS to prevent leaking the shared access signature.") .verify(); } }
class CredentialsTests { @Test public void basicCredentialsTest() throws Exception { BasicAuthenticationCredential credentials = new BasicAuthenticationCredential("user", "pass"); HttpPipelinePolicy auditorPolicy = (context, next) -> { String headerValue = context.getHttpRequest().getHeaders().getValue("Authorization"); Assertions.assertEquals("Basic dXNlcjpwYXNz", headerValue); return next.process(); }; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient()) .policies((context, next) -> credentials.getToken(new TokenRequestContext().addScopes("scope./default")) .flatMap(token -> { context.getHttpRequest().getHeaders().put("Authorization", "Basic " + token.getToken()); return next.process(); }), auditorPolicy) .build(); HttpRequest request = new HttpRequest(HttpMethod.GET, new URL("http: pipeline.send(request).block(); } @Test public void tokenCredentialTest() throws Exception { TokenCredential credentials = request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)); HttpPipelinePolicy auditorPolicy = (context, next) -> { String headerValue = context.getHttpRequest().getHeaders().getValue("Authorization"); Assertions.assertEquals("Bearer this_is_a_token", headerValue); return next.process(); }; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient()) .policies(new BearerTokenAuthenticationPolicy(credentials, "scope./default"), auditorPolicy) .build(); HttpRequest request = new HttpRequest(HttpMethod.GET, new URL("https: pipeline.send(request).block(); } @Test public void tokenCredentialHttpSchemeTest() throws Exception { TokenCredential credentials = request -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)); HttpPipelinePolicy auditorPolicy = (context, next) -> { String headerValue = context.getHttpRequest().getHeaders().getValue("Authorization"); Assertions.assertEquals("Bearer this_is_a_token", headerValue); return next.process(); }; final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient()) .policies(new BearerTokenAuthenticationPolicy(credentials, "scope./default"), auditorPolicy) .build(); HttpRequest request = new HttpRequest(HttpMethod.GET, new URL("http: StepVerifier.create(pipeline.send(request)) .expectErrorMessage("token credentials require a URL using the HTTPS protocol scheme") .verify(); } @ParameterizedTest @CsvSource( { "test_signature,https: "?test_signature,https: "test_signature,https: "?test_signature,https: "test_signature,https: "?test_signature,https: @Test public void sasCredentialsHTTPSchemeTest() throws Exception { AzureSasCredential credential = new AzureSasCredential("foo"); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient()) .policies(new AzureSasCredentialPolicy(credential)) .build(); HttpRequest request = new HttpRequest(HttpMethod.GET, new URL("http: StepVerifier.create(pipeline.send(request)) .expectErrorMessage("Shared access signature credentials require HTTPS to prevent leaking the shared access signature.") .verify(); } }
We can use `assertThat(context).hasSingleBean(AADAuthenticationProperties.class);` instead.
public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRegistrationRepository = context.getBean(AADWebAppClientRegistrationRepository.class); OAuth2AuthorizedClientRepository authorizedClientRepository = context.getBean(OAuth2AuthorizedClientRepository.class); OAuth2UserService userService = context.getBean(OAuth2UserService.class); assertNotNull(clientRegistrationRepository); assertNotNull(authorizedClientRepository); assertNotNull(userService); }); }
assertNotNull(clientRegistrationRepository);
public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); }
class AADWebAppConfigurationTest { @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunnerWithoutRequiredProperties() .run(context -> { assertThat(context).doesNotHaveBean("AADWebAppClientRegistrationRepository"); assertThat(context).doesNotHaveBean("OAuth2AuthorizedClientRepository"); assertThat(context).doesNotHaveBean("OAuth2UserService"); }); } @Test @Test public void clientRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile", "https: }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = https: "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: "https: "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "https: }); } @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "https: ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read", "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "https: ); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = https: "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "https: "https: "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
class AADWebAppConfigurationTest { @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test @Test public void clientRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
Why delete `allowed-groups`?
public static WebApplicationContextRunner getContextRunnerWithRequiredProperties() { return new WebApplicationContextRunner() .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .withUserConfiguration(AADWebAppConfiguration.class) .withPropertyValues( "azure.activedirectory.client-id = fake-client-id", "azure.activedirectory.client-secret = fake-client-secret", "azure.activedirectory.tenant-id = fake-tenant-id"); }
"azure.activedirectory.tenant-id = fake-tenant-id");
public static WebApplicationContextRunner getContextRunnerWithRequiredProperties() { return getContextRunner().withPropertyValues( "azure.activedirectory.client-id = fake-client-id", "azure.activedirectory.client-secret = fake-client-secret", "azure.activedirectory.tenant-id = fake-tenant-id"); }
class WebApplicationContextRunnerUtils { public static WebApplicationContextRunner getContextRunnerWithoutRequiredProperties() { return new WebApplicationContextRunner() .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .withUserConfiguration(AADWebAppConfiguration.class); } @SuppressWarnings("unchecked") public static MultiValueMap<String, String> toMultiValueMap(RequestEntity<?> entity) { return (MultiValueMap<String, String>) Optional.ofNullable(entity) .map(HttpEntity::getBody) .orElse(null); } }
class WebApplicationContextRunnerUtils { public static WebApplicationContextRunner getContextRunner() { return new WebApplicationContextRunner() .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .withUserConfiguration(AADWebAppConfiguration.class); } @SuppressWarnings("unchecked") public static MultiValueMap<String, String> toMultiValueMap(RequestEntity<?> entity) { return (MultiValueMap<String, String>) Optional.ofNullable(entity) .map(HttpEntity::getBody) .orElse(null); } }
Because allowed-group is not necessary.
public static WebApplicationContextRunner getContextRunnerWithRequiredProperties() { return new WebApplicationContextRunner() .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .withUserConfiguration(AADWebAppConfiguration.class) .withPropertyValues( "azure.activedirectory.client-id = fake-client-id", "azure.activedirectory.client-secret = fake-client-secret", "azure.activedirectory.tenant-id = fake-tenant-id"); }
"azure.activedirectory.tenant-id = fake-tenant-id");
public static WebApplicationContextRunner getContextRunnerWithRequiredProperties() { return getContextRunner().withPropertyValues( "azure.activedirectory.client-id = fake-client-id", "azure.activedirectory.client-secret = fake-client-secret", "azure.activedirectory.tenant-id = fake-tenant-id"); }
class WebApplicationContextRunnerUtils { public static WebApplicationContextRunner getContextRunnerWithoutRequiredProperties() { return new WebApplicationContextRunner() .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .withUserConfiguration(AADWebAppConfiguration.class); } @SuppressWarnings("unchecked") public static MultiValueMap<String, String> toMultiValueMap(RequestEntity<?> entity) { return (MultiValueMap<String, String>) Optional.ofNullable(entity) .map(HttpEntity::getBody) .orElse(null); } }
class WebApplicationContextRunnerUtils { public static WebApplicationContextRunner getContextRunner() { return new WebApplicationContextRunner() .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .withUserConfiguration(AADWebAppConfiguration.class); } @SuppressWarnings("unchecked") public static MultiValueMap<String, String> toMultiValueMap(RequestEntity<?> entity) { return (MultiValueMap<String, String>) Optional.ofNullable(entity) .map(HttpEntity::getBody) .orElse(null); } }
`allowed-groups` is not necessary here?
public void addScopeForDefaultClient() { contextRunner.withPropertyValues("azure.activedirectory.user-group.allowed-groups = group1, group2") .run(context -> { getBeans(context); MultiValueMap<String, String> body = convertedBodyOf(createCodeGrantRequest(azure)); assertEquals( "openid profile offline_access https: + "https: body.getFirst("scope") ); }); }
contextRunner.withPropertyValues("azure.activedirectory.user-group.allowed-groups = group1, group2")
public void addScopeForDefaultClient() { contextRunner.run(context -> { getBeans(context); MultiValueMap<String, String> body = convertedBodyOf(createCodeGrantRequest(azure)); assertEquals( "openid profile offline_access", body.getFirst("scope") ); }); }
class AADOAuth2AuthorizationCodeGrantRequestEntityConverterTest { private AADWebAppClientRegistrationRepository clientRepo; private ClientRegistration azure; private ClientRegistration arm; private final WebApplicationContextRunner contextRunner = WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.base-uri = fake-uri", "azure.activedirectory.authorization-clients.arm.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.on-demand=true"); private void getBeans(AssertableWebApplicationContext context) { clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); azure = clientRepo.findByRegistrationId("azure"); arm = clientRepo.findByRegistrationId("arm"); } @Test @Test public void addScopeForOnDemandClient() { contextRunner.run(context -> { getBeans(context); MultiValueMap<String, String> body = convertedBodyOf(createCodeGrantRequest(arm)); assertEquals("Calendars.Read openid profile", body.getFirst("scope")); }); } @Test @SuppressWarnings("unchecked") public void addHeadersForDefaultClient() { contextRunner.run(context -> { getBeans(context); HttpHeaders httpHeaders = convertedHeaderOf(createCodeGrantRequest(azure)); assertThat(httpHeaders.entrySet(), (Matcher) hasItems(expectedHeaders())); }); } @Test @SuppressWarnings("unchecked") public void addHeadersForOnDemandClient() { contextRunner.run(context -> { getBeans(context); HttpHeaders httpHeaders = convertedHeaderOf(createCodeGrantRequest(arm)); assertThat(httpHeaders.entrySet(), (Matcher) hasItems(expectedHeaders())); }); } private HttpHeaders convertedHeaderOf(OAuth2AuthorizationCodeGrantRequest request) { AADOAuth2AuthorizationCodeGrantRequestEntityConverter converter = new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(clientRepo.getAzureClient()); RequestEntity<?> entity = converter.convert(request); return Optional.ofNullable(entity) .map(HttpEntity::getHeaders) .orElse(null); } private Object[] expectedHeaders() { return AADOAuth2AuthorizationCodeGrantRequestEntityConverter .getHttpHeaders() .entrySet() .stream() .filter(entry -> !entry.getKey().equals("client-request-id")) .toArray(); } private MultiValueMap<String, String> convertedBodyOf(OAuth2AuthorizationCodeGrantRequest request) { AADOAuth2AuthorizationCodeGrantRequestEntityConverter converter = new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(clientRepo.getAzureClient()); RequestEntity<?> entity = converter.convert(request); return WebApplicationContextRunnerUtils.toMultiValueMap(entity); } private OAuth2AuthorizationCodeGrantRequest createCodeGrantRequest(ClientRegistration client) { return new OAuth2AuthorizationCodeGrantRequest(client, createExchange(client)); } private OAuth2AuthorizationExchange createExchange(ClientRegistration client) { return new OAuth2AuthorizationExchange( createAuthorizationRequest(client), createAuthorizationResponse()); } private OAuth2AuthorizationRequest createAuthorizationRequest(ClientRegistration client) { OAuth2AuthorizationRequest.Builder builder = OAuth2AuthorizationRequest.authorizationCode(); builder.authorizationUri(client.getProviderDetails().getAuthorizationUri()); builder.clientId(client.getClientId()); builder.scopes(client.getScopes()); builder.state("fake-state"); builder.redirectUri("http: return builder.build(); } private OAuth2AuthorizationResponse createAuthorizationResponse() { OAuth2AuthorizationResponse.Builder builder = OAuth2AuthorizationResponse.success("fake-code"); builder.redirectUri("http: builder.state("fake-state"); return builder.build(); } }
class AADOAuth2AuthorizationCodeGrantRequestEntityConverterTest { private AADWebAppClientRegistrationRepository clientRepo; private ClientRegistration azure; private ClientRegistration arm; private final WebApplicationContextRunner contextRunner = WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.base-uri = fake-uri", "azure.activedirectory.authorization-clients.arm.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.on-demand=true"); private void getBeans(AssertableWebApplicationContext context) { clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); azure = clientRepo.findByRegistrationId("azure"); arm = clientRepo.findByRegistrationId("arm"); } @Test @Test public void addScopeForOnDemandClient() { contextRunner.run(context -> { getBeans(context); MultiValueMap<String, String> body = convertedBodyOf(createCodeGrantRequest(arm)); assertEquals("Calendars.Read openid profile", body.getFirst("scope")); }); } @Test @SuppressWarnings("unchecked") public void addHeadersForDefaultClient() { contextRunner.run(context -> { getBeans(context); HttpHeaders httpHeaders = convertedHeaderOf(createCodeGrantRequest(azure)); assertThat(httpHeaders.entrySet(), (Matcher) hasItems(expectedHeaders())); }); } @Test @SuppressWarnings("unchecked") public void addHeadersForOnDemandClient() { contextRunner.run(context -> { getBeans(context); HttpHeaders httpHeaders = convertedHeaderOf(createCodeGrantRequest(arm)); assertThat(httpHeaders.entrySet(), (Matcher) hasItems(expectedHeaders())); }); } private HttpHeaders convertedHeaderOf(OAuth2AuthorizationCodeGrantRequest request) { AADOAuth2AuthorizationCodeGrantRequestEntityConverter converter = new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(clientRepo.getAzureClient()); RequestEntity<?> entity = converter.convert(request); return Optional.ofNullable(entity) .map(HttpEntity::getHeaders) .orElse(null); } private Object[] expectedHeaders() { return AADOAuth2AuthorizationCodeGrantRequestEntityConverter .getHttpHeaders() .entrySet() .stream() .filter(entry -> !entry.getKey().equals("client-request-id")) .toArray(); } private MultiValueMap<String, String> convertedBodyOf(OAuth2AuthorizationCodeGrantRequest request) { AADOAuth2AuthorizationCodeGrantRequestEntityConverter converter = new AADOAuth2AuthorizationCodeGrantRequestEntityConverter(clientRepo.getAzureClient()); RequestEntity<?> entity = converter.convert(request); return WebApplicationContextRunnerUtils.toMultiValueMap(entity); } private OAuth2AuthorizationCodeGrantRequest createCodeGrantRequest(ClientRegistration client) { return new OAuth2AuthorizationCodeGrantRequest(client, createExchange(client)); } private OAuth2AuthorizationExchange createExchange(ClientRegistration client) { return new OAuth2AuthorizationExchange( createAuthorizationRequest(client), createAuthorizationResponse()); } private OAuth2AuthorizationRequest createAuthorizationRequest(ClientRegistration client) { OAuth2AuthorizationRequest.Builder builder = OAuth2AuthorizationRequest.authorizationCode(); builder.authorizationUri(client.getProviderDetails().getAuthorizationUri()); builder.clientId(client.getClientId()); builder.scopes(client.getScopes()); builder.state("fake-state"); builder.redirectUri("http: return builder.build(); } private OAuth2AuthorizationResponse createAuthorizationResponse() { OAuth2AuthorizationResponse.Builder builder = OAuth2AuthorizationResponse.success("fake-code"); builder.redirectUri("http: builder.state("fake-state"); return builder.build(); } }
Same here.
public void clientRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile", "https: }); }
"azure.activedirectory.user-group.allowed-groups = group1, group2"
public void clientRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); }
class AADWebAppConfigurationTest { @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class, INCLUDE_ANCESTORS); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class, INCLUDE_ANCESTORS); assertThat(context).doesNotHaveBean(OAuth2UserService.class, INCLUDE_ANCESTORS); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class, INCLUDE_ANCESTORS); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class, INCLUDE_ANCESTORS); assertThat(context).hasSingleBean(OAuth2UserService.class, INCLUDE_ANCESTORS); }); } @Test @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = https: "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: "https: "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "https: }); } @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "https: ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read", "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "https: ); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = https: "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "https: "https: "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
class AADWebAppConfigurationTest { @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
Seems we can delete all `allowed-groups`.
public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = https: "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: "https: "https: assertDefaultScopes(graph, "Calendars.Read"); }); }
"azure.activedirectory.user-group.allowed-groups = group1, group2"
public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); }
class AADWebAppConfigurationTest { @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class, INCLUDE_ANCESTORS); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class, INCLUDE_ANCESTORS); assertThat(context).doesNotHaveBean(OAuth2UserService.class, INCLUDE_ANCESTORS); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class, INCLUDE_ANCESTORS); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class, INCLUDE_ANCESTORS); assertThat(context).hasSingleBean(OAuth2UserService.class, INCLUDE_ANCESTORS); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile", "https: }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "https: }); } @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "https: ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read", "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "https: ); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = https: "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "https: "https: "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
class AADWebAppConfigurationTest { @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
We'd better add more tests: 1. `client-id` configured, but no `authorization-clients`. 2. `client-id` not configured, but `authorization-clients` configured.
public void testWithoutAnyPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .run(context -> { assertThat(context).doesNotHaveBean(AADAuthenticationProperties.class); assertThat(context).doesNotHaveBean(ClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); }); }
}
public void testWithoutAnyPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .run(context -> { assertThat(context).doesNotHaveBean(AADAuthenticationProperties.class); assertThat(context).doesNotHaveBean(ClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); }); }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test @Test public void testWithRequiredPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.client-id=fake-client-id") .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=fake-scope") .run(context -> { assertThat(context).hasSingleBean(AADAuthenticationProperties.class); assertThat(context).hasSingleBean(ClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); }); } @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); } @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .withPropertyValues("azure.activedirectory.authorization-clients.custom.scopes=" + "api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test @Test public void testWithRequiredPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.client-id=fake-client-id") .run(context -> { assertThat(context).hasSingleBean(AADAuthenticationProperties.class); assertThat(context).hasSingleBean(ClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); }); } @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> assertThat(context).doesNotHaveBean(AADOAuth2OboAuthorizedClientRepository.class)); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> assertThat(context).doesNotHaveBean(AADOAuth2OboAuthorizedClientRepository.class)); } @Test public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); } @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .withPropertyValues("azure.activedirectory.authorization-clients.custom.scopes=" + "api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
Yes, I'm just about to add them.
public void testWithoutAnyPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .run(context -> { assertThat(context).doesNotHaveBean(AADAuthenticationProperties.class); assertThat(context).doesNotHaveBean(ClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); }); }
}
public void testWithoutAnyPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .run(context -> { assertThat(context).doesNotHaveBean(AADAuthenticationProperties.class); assertThat(context).doesNotHaveBean(ClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); }); }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test @Test public void testWithRequiredPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.client-id=fake-client-id") .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=fake-scope") .run(context -> { assertThat(context).hasSingleBean(AADAuthenticationProperties.class); assertThat(context).hasSingleBean(ClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); }); } @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); } @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .withPropertyValues("azure.activedirectory.authorization-clients.custom.scopes=" + "api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test @Test public void testWithRequiredPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.client-id=fake-client-id") .run(context -> { assertThat(context).hasSingleBean(AADAuthenticationProperties.class); assertThat(context).hasSingleBean(ClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); }); } @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> assertThat(context).doesNotHaveBean(AADOAuth2OboAuthorizedClientRepository.class)); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> assertThat(context).doesNotHaveBean(AADOAuth2OboAuthorizedClientRepository.class)); } @Test public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); } @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .withPropertyValues("azure.activedirectory.authorization-clients.custom.scopes=" + "api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
Current tests can not make sure `@ConditionalOnExpression("!'${azure.activedirectory.authorization-clients}'.isEmpty()")` take effect.
public void testWithoutAnyPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .run(context -> { assertThat(context).doesNotHaveBean(AADAuthenticationProperties.class); assertThat(context).doesNotHaveBean(ClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); }); }
}
public void testWithoutAnyPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .run(context -> { assertThat(context).doesNotHaveBean(AADAuthenticationProperties.class); assertThat(context).doesNotHaveBean(ClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); }); }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test @Test public void testWithRequiredPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.client-id=fake-client-id") .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=fake-scope") .run(context -> { assertThat(context).hasSingleBean(AADAuthenticationProperties.class); assertThat(context).hasSingleBean(ClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); }); } @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> { assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"); }); } @Test public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); } @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .withPropertyValues("azure.activedirectory.authorization-clients.custom.scopes=" + "api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test @Test public void testWithRequiredPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.client-id=fake-client-id") .run(context -> { assertThat(context).hasSingleBean(AADAuthenticationProperties.class); assertThat(context).hasSingleBean(ClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); }); } @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> assertThat(context).doesNotHaveBean(AADOAuth2OboAuthorizedClientRepository.class)); } @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> assertThat(context).doesNotHaveBean(AADOAuth2OboAuthorizedClientRepository.class)); } @Test public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); } @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .withPropertyValues("azure.activedirectory.authorization-clients.custom.scopes=" + "api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
`doesNotHaveBean` should use bean name. `AADOAuth2OboAuthorizedClientRepository` is not a bean name. My suggestion is to use ``` doesNotHaveBean(Class<?> type) { ``` instead.
public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository")); }
.run(context -> assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"));
public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> assertThat(context).doesNotHaveBean(AADOAuth2OboAuthorizedClientRepository.class)); }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testWithoutAnyPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .run(context -> { assertThat(context).doesNotHaveBean(AADAuthenticationProperties.class); assertThat(context).doesNotHaveBean(ClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); }); } @Test public void testWithRequiredPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.client-id=fake-client-id") .run(context -> { assertThat(context).hasSingleBean(AADAuthenticationProperties.class); assertThat(context).hasSingleBean(ClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); }); } @Test @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository")); } @Test public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); } @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .withPropertyValues("azure.activedirectory.authorization-clients.custom.scopes=" + "api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testWithoutAnyPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .run(context -> { assertThat(context).doesNotHaveBean(AADAuthenticationProperties.class); assertThat(context).doesNotHaveBean(ClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); }); } @Test public void testWithRequiredPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.client-id=fake-client-id") .run(context -> { assertThat(context).hasSingleBean(AADAuthenticationProperties.class); assertThat(context).hasSingleBean(ClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); }); } @Test @Test public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> assertThat(context).doesNotHaveBean(AADOAuth2OboAuthorizedClientRepository.class)); } @Test public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); } @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .withPropertyValues("azure.activedirectory.authorization-clients.custom.scopes=" + "api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
Same here.
public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository")); }
.run(context -> assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository"));
public void testNotExistOAuth2LoginAuthenticationFilter() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(OAuth2LoginAuthenticationFilter.class)) .run(context -> assertThat(context).doesNotHaveBean(AADOAuth2OboAuthorizedClientRepository.class)); }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testWithoutAnyPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .run(context -> { assertThat(context).doesNotHaveBean(AADAuthenticationProperties.class); assertThat(context).doesNotHaveBean(ClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); }); } @Test public void testWithRequiredPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.client-id=fake-client-id") .run(context -> { assertThat(context).hasSingleBean(AADAuthenticationProperties.class); assertThat(context).hasSingleBean(ClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); }); } @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> assertThat(context).doesNotHaveBean("AADOAuth2OboAuthorizedClientRepository")); } @Test @Test public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); } @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .withPropertyValues("azure.activedirectory.authorization-clients.custom.scopes=" + "api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
class AADResourceServerOboConfigurationTest { private final WebApplicationContextRunner contextRunner = new WebApplicationContextRunner() .withPropertyValues( "azure.activedirectory.tenant-id=fake-tenant-id", "azure.activedirectory.client-id=fake-client-id", "azure.activedirectory.client-secret=fake-client-secret"); @Test public void testWithoutAnyPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .run(context -> { assertThat(context).doesNotHaveBean(AADAuthenticationProperties.class); assertThat(context).doesNotHaveBean(ClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); }); } @Test public void testWithRequiredPropertiesSet() { new WebApplicationContextRunner() .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.client-id=fake-client-id") .run(context -> { assertThat(context).hasSingleBean(AADAuthenticationProperties.class); assertThat(context).hasSingleBean(ClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); }); } @Test public void testNotExistBearerTokenAuthenticationToken() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withClassLoader(new FilteredClassLoader(BearerTokenAuthenticationToken.class)) .run(context -> assertThat(context).doesNotHaveBean(AADOAuth2OboAuthorizedClientRepository.class)); } @Test @Test public void testOnlyGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); Set<String> graphScopes = graph.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(graphScopes).containsOnly("https: }); } @Test public void testExistCustomAndGraphClient() { this.contextRunner .withUserConfiguration(AADResourceServerOboConfiguration.class) .withPropertyValues("azure.activedirectory.authorization-clients.graph.scopes=" + "https: .withPropertyValues("azure.activedirectory.authorization-clients.custom.scopes=" + "api: .run(context -> { final InMemoryClientRegistrationRepository oboRepo = context.getBean( InMemoryClientRegistrationRepository.class); final OAuth2AuthorizedClientRepository aadOboRepo = context.getBean( AADOAuth2OboAuthorizedClientRepository.class); ClientRegistration graph = oboRepo.findByRegistrationId("graph"); ClientRegistration custom = oboRepo.findByRegistrationId("custom"); Set<String> graphScopes = graph.getScopes(); Set<String> customScopes = custom.getScopes(); assertThat(aadOboRepo).isNotNull(); assertThat(oboRepo).isExactlyInstanceOf(InMemoryClientRegistrationRepository.class); assertThat(graph).isNotNull(); assertThat(customScopes).isNotNull(); assertThat(graphScopes).containsOnly("https: assertThat(customScopes).containsOnly("api: }); } }
Could these be `equalsIgnoreCase`?
public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: DetailAST modifiersToken = token.findFirstToken(TokenTypes.MODIFIERS); classStaticDeque.offer(modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)); break; case TokenTypes.CTOR_DEF: isInConstructor = true; break; case TokenTypes.METHOD_DEF: DetailAST methodModifiersToken = token.findFirstToken(TokenTypes.MODIFIERS); methodStaticDeque.offer(methodModifiersToken.branchContains(TokenTypes.LITERAL_STATIC)); break; case TokenTypes.LITERAL_THROW: if (classStaticDeque.isEmpty() || classStaticDeque.peek() || isInConstructor || methodStaticDeque.isEmpty() || methodStaticDeque.peek()) { return; } DetailAST methodCallToken = token.findFirstToken(TokenTypes.EXPR).findFirstToken(TokenTypes.METHOD_CALL); if (methodCallToken == null) { log(token, String.format(THROW_LOGGER_EXCEPTION_MESSAGE, LOGGER_LOG_EXCEPTION_AS_ERROR, LOGGER_LOG_THROWABLE_AS_ERROR, LOGGER_LOG_EXCEPTION_AS_WARNING, LOGGER_LOG_THROWABLE_AS_WARNING)); return; } String methodCallName = FullIdent.createFullIdent(methodCallToken.findFirstToken(TokenTypes.DOT)).getText(); if (!LOGGER_LOG_EXCEPTION_AS_ERROR.toLowerCase(Locale.ROOT) .equals(methodCallName.toLowerCase(Locale.ROOT)) && !LOGGER_LOG_THROWABLE_AS_ERROR.toLowerCase(Locale.ROOT) .equals(methodCallName.toLowerCase(Locale.ROOT)) && !LOGGER_LOG_EXCEPTION_AS_WARNING.toLowerCase(Locale.ROOT) .equals(methodCallName.toLowerCase(Locale.ROOT)) && !LOGGER_LOG_THROWABLE_AS_WARNING.toLowerCase(Locale.ROOT) .equals(methodCallName.toLowerCase(Locale.ROOT))) { log(token, String.format(THROW_LOGGER_EXCEPTION_MESSAGE, LOGGER_LOG_EXCEPTION_AS_ERROR, LOGGER_LOG_THROWABLE_AS_ERROR, LOGGER_LOG_EXCEPTION_AS_WARNING, LOGGER_LOG_THROWABLE_AS_WARNING)); } break; default: break; } }
if (!LOGGER_LOG_EXCEPTION_AS_ERROR.toLowerCase(Locale.ROOT)
public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: DetailAST modifiersToken = token.findFirstToken(TokenTypes.MODIFIERS); classStaticDeque.offer(modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)); break; case TokenTypes.CTOR_DEF: isInConstructor = true; break; case TokenTypes.METHOD_DEF: DetailAST methodModifiersToken = token.findFirstToken(TokenTypes.MODIFIERS); methodStaticDeque.offer(methodModifiersToken.branchContains(TokenTypes.LITERAL_STATIC)); break; case TokenTypes.LITERAL_THROW: if (classStaticDeque.isEmpty() || classStaticDeque.peek() || isInConstructor || methodStaticDeque.isEmpty() || methodStaticDeque.peek()) { return; } DetailAST methodCallToken = token.findFirstToken(TokenTypes.EXPR).findFirstToken(TokenTypes.METHOD_CALL); if (methodCallToken == null) { log(token, String.format(THROW_LOGGER_EXCEPTION_MESSAGE, LOGGER_LOG_EXCEPTION_AS_ERROR, LOGGER_LOG_THROWABLE_AS_ERROR, LOGGER_LOG_EXCEPTION_AS_WARNING, LOGGER_LOG_THROWABLE_AS_WARNING)); return; } String methodCallName = FullIdent.createFullIdent(methodCallToken.findFirstToken(TokenTypes.DOT)).getText(); if (!LOGGER_LOG_EXCEPTION_AS_ERROR.equalsIgnoreCase(methodCallName) && !LOGGER_LOG_THROWABLE_AS_ERROR.equalsIgnoreCase(methodCallName) && !LOGGER_LOG_EXCEPTION_AS_WARNING.equalsIgnoreCase(methodCallName) && !LOGGER_LOG_THROWABLE_AS_WARNING.equalsIgnoreCase(methodCallName)) { log(token, String.format(THROW_LOGGER_EXCEPTION_MESSAGE, LOGGER_LOG_EXCEPTION_AS_ERROR, LOGGER_LOG_THROWABLE_AS_ERROR, LOGGER_LOG_EXCEPTION_AS_WARNING, LOGGER_LOG_THROWABLE_AS_WARNING)); } break; default: break; } }
class is static private final Queue<Boolean> classStaticDeque = Collections.asLifoQueue(new ArrayDeque<>()); private final Queue<Boolean> methodStaticDeque = Collections.asLifoQueue(new ArrayDeque<>()); private boolean isInConstructor = false; @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class is static private final Queue<Boolean> classStaticDeque = Collections.asLifoQueue(new ArrayDeque<>()); private final Queue<Boolean> methodStaticDeque = Collections.asLifoQueue(new ArrayDeque<>()); private boolean isInConstructor = false; @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
This change is to allow static logger instances named `LOGGER`
public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: DetailAST modifiersToken = token.findFirstToken(TokenTypes.MODIFIERS); classStaticDeque.offer(modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)); break; case TokenTypes.CTOR_DEF: isInConstructor = true; break; case TokenTypes.METHOD_DEF: DetailAST methodModifiersToken = token.findFirstToken(TokenTypes.MODIFIERS); methodStaticDeque.offer(methodModifiersToken.branchContains(TokenTypes.LITERAL_STATIC)); break; case TokenTypes.LITERAL_THROW: if (classStaticDeque.isEmpty() || classStaticDeque.peek() || isInConstructor || methodStaticDeque.isEmpty() || methodStaticDeque.peek()) { return; } DetailAST methodCallToken = token.findFirstToken(TokenTypes.EXPR).findFirstToken(TokenTypes.METHOD_CALL); if (methodCallToken == null) { log(token, String.format(THROW_LOGGER_EXCEPTION_MESSAGE, LOGGER_LOG_EXCEPTION_AS_ERROR, LOGGER_LOG_THROWABLE_AS_ERROR, LOGGER_LOG_EXCEPTION_AS_WARNING, LOGGER_LOG_THROWABLE_AS_WARNING)); return; } String methodCallName = FullIdent.createFullIdent(methodCallToken.findFirstToken(TokenTypes.DOT)).getText(); if (!LOGGER_LOG_EXCEPTION_AS_ERROR.equalsIgnoreCase(methodCallName) && !LOGGER_LOG_THROWABLE_AS_ERROR.equalsIgnoreCase(methodCallName) && !LOGGER_LOG_EXCEPTION_AS_WARNING.equalsIgnoreCase(methodCallName) && !LOGGER_LOG_THROWABLE_AS_WARNING.equalsIgnoreCase(methodCallName)) { log(token, String.format(THROW_LOGGER_EXCEPTION_MESSAGE, LOGGER_LOG_EXCEPTION_AS_ERROR, LOGGER_LOG_THROWABLE_AS_ERROR, LOGGER_LOG_EXCEPTION_AS_WARNING, LOGGER_LOG_THROWABLE_AS_WARNING)); } break; default: break; } }
String methodCallName =
public void visitToken(DetailAST token) { switch (token.getType()) { case TokenTypes.CLASS_DEF: DetailAST modifiersToken = token.findFirstToken(TokenTypes.MODIFIERS); classStaticDeque.offer(modifiersToken.branchContains(TokenTypes.LITERAL_STATIC)); break; case TokenTypes.CTOR_DEF: isInConstructor = true; break; case TokenTypes.METHOD_DEF: DetailAST methodModifiersToken = token.findFirstToken(TokenTypes.MODIFIERS); methodStaticDeque.offer(methodModifiersToken.branchContains(TokenTypes.LITERAL_STATIC)); break; case TokenTypes.LITERAL_THROW: if (classStaticDeque.isEmpty() || classStaticDeque.peek() || isInConstructor || methodStaticDeque.isEmpty() || methodStaticDeque.peek()) { return; } DetailAST methodCallToken = token.findFirstToken(TokenTypes.EXPR).findFirstToken(TokenTypes.METHOD_CALL); if (methodCallToken == null) { log(token, String.format(THROW_LOGGER_EXCEPTION_MESSAGE, LOGGER_LOG_EXCEPTION_AS_ERROR, LOGGER_LOG_THROWABLE_AS_ERROR, LOGGER_LOG_EXCEPTION_AS_WARNING, LOGGER_LOG_THROWABLE_AS_WARNING)); return; } String methodCallName = FullIdent.createFullIdent(methodCallToken.findFirstToken(TokenTypes.DOT)).getText(); if (!LOGGER_LOG_EXCEPTION_AS_ERROR.equalsIgnoreCase(methodCallName) && !LOGGER_LOG_THROWABLE_AS_ERROR.equalsIgnoreCase(methodCallName) && !LOGGER_LOG_EXCEPTION_AS_WARNING.equalsIgnoreCase(methodCallName) && !LOGGER_LOG_THROWABLE_AS_WARNING.equalsIgnoreCase(methodCallName)) { log(token, String.format(THROW_LOGGER_EXCEPTION_MESSAGE, LOGGER_LOG_EXCEPTION_AS_ERROR, LOGGER_LOG_THROWABLE_AS_ERROR, LOGGER_LOG_EXCEPTION_AS_WARNING, LOGGER_LOG_THROWABLE_AS_WARNING)); } break; default: break; } }
class is static private final Queue<Boolean> classStaticDeque = Collections.asLifoQueue(new ArrayDeque<>()); private final Queue<Boolean> methodStaticDeque = Collections.asLifoQueue(new ArrayDeque<>()); private boolean isInConstructor = false; @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
class is static private final Queue<Boolean> classStaticDeque = Collections.asLifoQueue(new ArrayDeque<>()); private final Queue<Boolean> methodStaticDeque = Collections.asLifoQueue(new ArrayDeque<>()); private boolean isInConstructor = false; @Override public int[] getDefaultTokens() { return getRequiredTokens(); }
Wouldn't you need to get the superset of the FeedRangeDiagnostics.getClientSideRequestStatistics().getRegionsContacted() here instead?
public Set<URI> getRegionsContacted() { if (this.feedResponseDiagnostics != null) { return null; } return this.clientSideRequestStatistics.getRegionsContacted(); }
return null;
public Set<URI> getRegionsContacted() { if (this.feedResponseDiagnostics != null) { return null; } return this.clientSideRequestStatistics.getRegionsContacted(); }
class CosmosDiagnostics { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final String COSMOS_DIAGNOSTICS_KEY = "cosmosDiagnostics"; private ClientSideRequestStatistics clientSideRequestStatistics; private FeedResponseDiagnostics feedResponseDiagnostics; static final String USER_AGENT = Utils.getUserAgent(); static final String USER_AGENT_KEY = "userAgent"; CosmosDiagnostics(DiagnosticsClientContext diagnosticsClientContext) { this.clientSideRequestStatistics = new ClientSideRequestStatistics(diagnosticsClientContext); } CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) { this.feedResponseDiagnostics = feedResponseDiagnostics; } ClientSideRequestStatistics clientSideRequestStatistics() { return clientSideRequestStatistics; } CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) { this.clientSideRequestStatistics = clientSideRequestStatistics; return this; } /** * Retrieves Response Diagnostic String * * @return Response Diagnostic String */ @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); fillCosmosDiagnostics(null, stringBuilder); return stringBuilder.toString(); } /** * Retrieves duration related to the completion of the request. * This represents end to end duration of an operation including all the retries. * This is meant for point operation only, for query please use toString() to get full query diagnostics. * * @return request completion duration */ public Duration getDuration() { if (this.feedResponseDiagnostics != null) { return null; } return this.clientSideRequestStatistics.getDuration(); } /** * Regions contacted for this request * @return set of regions contacted for this request */ @Beta(value = Beta.SinceVersion.V4_9_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) FeedResponseDiagnostics getFeedResponseDiagnostics() { return feedResponseDiagnostics; } void fillCosmosDiagnostics(ObjectNode parentNode, StringBuilder stringBuilder) { if (this.feedResponseDiagnostics != null) { if (parentNode != null) { parentNode.put(USER_AGENT_KEY, USER_AGENT); parentNode.putPOJO(COSMOS_DIAGNOSTICS_KEY, feedResponseDiagnostics); } if (stringBuilder != null) { stringBuilder.append(USER_AGENT_KEY +"=").append(USER_AGENT).append(System.lineSeparator()); stringBuilder.append(feedResponseDiagnostics); } } else { if (parentNode != null) { parentNode.putPOJO(COSMOS_DIAGNOSTICS_KEY, clientSideRequestStatistics); } if (stringBuilder != null) { try { stringBuilder.append(OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics)); } catch (JsonProcessingException e) { LOGGER.error("Error while parsing diagnostics ", e); } } } } void setFeedResponseDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) { this.feedResponseDiagnostics = feedResponseDiagnostics; } }
class CosmosDiagnostics { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final String COSMOS_DIAGNOSTICS_KEY = "cosmosDiagnostics"; private ClientSideRequestStatistics clientSideRequestStatistics; private FeedResponseDiagnostics feedResponseDiagnostics; static final String USER_AGENT = Utils.getUserAgent(); static final String USER_AGENT_KEY = "userAgent"; CosmosDiagnostics(DiagnosticsClientContext diagnosticsClientContext) { this.clientSideRequestStatistics = new ClientSideRequestStatistics(diagnosticsClientContext); } CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) { this.feedResponseDiagnostics = feedResponseDiagnostics; } ClientSideRequestStatistics clientSideRequestStatistics() { return clientSideRequestStatistics; } CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) { this.clientSideRequestStatistics = clientSideRequestStatistics; return this; } /** * Retrieves Response Diagnostic String * * @return Response Diagnostic String */ @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); fillCosmosDiagnostics(null, stringBuilder); return stringBuilder.toString(); } /** * Retrieves duration related to the completion of the request. * This represents end to end duration of an operation including all the retries. * This is meant for point operation only, for query please use toString() to get full query diagnostics. * * @return request completion duration */ public Duration getDuration() { if (this.feedResponseDiagnostics != null) { return null; } return this.clientSideRequestStatistics.getDuration(); } /** * Regions contacted for this request * @return set of regions contacted for this request */ @Beta(value = Beta.SinceVersion.V4_9_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) FeedResponseDiagnostics getFeedResponseDiagnostics() { return feedResponseDiagnostics; } void fillCosmosDiagnostics(ObjectNode parentNode, StringBuilder stringBuilder) { if (this.feedResponseDiagnostics != null) { if (parentNode != null) { parentNode.put(USER_AGENT_KEY, USER_AGENT); parentNode.putPOJO(COSMOS_DIAGNOSTICS_KEY, feedResponseDiagnostics); } if (stringBuilder != null) { stringBuilder.append(USER_AGENT_KEY +"=").append(USER_AGENT).append(System.lineSeparator()); stringBuilder.append(feedResponseDiagnostics); } } else { if (parentNode != null) { parentNode.putPOJO(COSMOS_DIAGNOSTICS_KEY, clientSideRequestStatistics); } if (stringBuilder != null) { try { stringBuilder.append(OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics)); } catch (JsonProcessingException e) { LOGGER.error("Error while parsing diagnostics ", e); } } } } void setFeedResponseDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) { this.feedResponseDiagnostics = feedResponseDiagnostics; } }
Aggregating regions contacted from all list of clientSideRequestStatistics needs to be done. I will do this in subsequent PRs
public Set<URI> getRegionsContacted() { if (this.feedResponseDiagnostics != null) { return null; } return this.clientSideRequestStatistics.getRegionsContacted(); }
return null;
public Set<URI> getRegionsContacted() { if (this.feedResponseDiagnostics != null) { return null; } return this.clientSideRequestStatistics.getRegionsContacted(); }
class CosmosDiagnostics { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final String COSMOS_DIAGNOSTICS_KEY = "cosmosDiagnostics"; private ClientSideRequestStatistics clientSideRequestStatistics; private FeedResponseDiagnostics feedResponseDiagnostics; static final String USER_AGENT = Utils.getUserAgent(); static final String USER_AGENT_KEY = "userAgent"; CosmosDiagnostics(DiagnosticsClientContext diagnosticsClientContext) { this.clientSideRequestStatistics = new ClientSideRequestStatistics(diagnosticsClientContext); } CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) { this.feedResponseDiagnostics = feedResponseDiagnostics; } ClientSideRequestStatistics clientSideRequestStatistics() { return clientSideRequestStatistics; } CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) { this.clientSideRequestStatistics = clientSideRequestStatistics; return this; } /** * Retrieves Response Diagnostic String * * @return Response Diagnostic String */ @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); fillCosmosDiagnostics(null, stringBuilder); return stringBuilder.toString(); } /** * Retrieves duration related to the completion of the request. * This represents end to end duration of an operation including all the retries. * This is meant for point operation only, for query please use toString() to get full query diagnostics. * * @return request completion duration */ public Duration getDuration() { if (this.feedResponseDiagnostics != null) { return null; } return this.clientSideRequestStatistics.getDuration(); } /** * Regions contacted for this request * @return set of regions contacted for this request */ @Beta(value = Beta.SinceVersion.V4_9_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) FeedResponseDiagnostics getFeedResponseDiagnostics() { return feedResponseDiagnostics; } void fillCosmosDiagnostics(ObjectNode parentNode, StringBuilder stringBuilder) { if (this.feedResponseDiagnostics != null) { if (parentNode != null) { parentNode.put(USER_AGENT_KEY, USER_AGENT); parentNode.putPOJO(COSMOS_DIAGNOSTICS_KEY, feedResponseDiagnostics); } if (stringBuilder != null) { stringBuilder.append(USER_AGENT_KEY +"=").append(USER_AGENT).append(System.lineSeparator()); stringBuilder.append(feedResponseDiagnostics); } } else { if (parentNode != null) { parentNode.putPOJO(COSMOS_DIAGNOSTICS_KEY, clientSideRequestStatistics); } if (stringBuilder != null) { try { stringBuilder.append(OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics)); } catch (JsonProcessingException e) { LOGGER.error("Error while parsing diagnostics ", e); } } } } void setFeedResponseDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) { this.feedResponseDiagnostics = feedResponseDiagnostics; } }
class CosmosDiagnostics { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosDiagnostics.class); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final String COSMOS_DIAGNOSTICS_KEY = "cosmosDiagnostics"; private ClientSideRequestStatistics clientSideRequestStatistics; private FeedResponseDiagnostics feedResponseDiagnostics; static final String USER_AGENT = Utils.getUserAgent(); static final String USER_AGENT_KEY = "userAgent"; CosmosDiagnostics(DiagnosticsClientContext diagnosticsClientContext) { this.clientSideRequestStatistics = new ClientSideRequestStatistics(diagnosticsClientContext); } CosmosDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) { this.feedResponseDiagnostics = feedResponseDiagnostics; } ClientSideRequestStatistics clientSideRequestStatistics() { return clientSideRequestStatistics; } CosmosDiagnostics clientSideRequestStatistics(ClientSideRequestStatistics clientSideRequestStatistics) { this.clientSideRequestStatistics = clientSideRequestStatistics; return this; } /** * Retrieves Response Diagnostic String * * @return Response Diagnostic String */ @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); fillCosmosDiagnostics(null, stringBuilder); return stringBuilder.toString(); } /** * Retrieves duration related to the completion of the request. * This represents end to end duration of an operation including all the retries. * This is meant for point operation only, for query please use toString() to get full query diagnostics. * * @return request completion duration */ public Duration getDuration() { if (this.feedResponseDiagnostics != null) { return null; } return this.clientSideRequestStatistics.getDuration(); } /** * Regions contacted for this request * @return set of regions contacted for this request */ @Beta(value = Beta.SinceVersion.V4_9_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) FeedResponseDiagnostics getFeedResponseDiagnostics() { return feedResponseDiagnostics; } void fillCosmosDiagnostics(ObjectNode parentNode, StringBuilder stringBuilder) { if (this.feedResponseDiagnostics != null) { if (parentNode != null) { parentNode.put(USER_AGENT_KEY, USER_AGENT); parentNode.putPOJO(COSMOS_DIAGNOSTICS_KEY, feedResponseDiagnostics); } if (stringBuilder != null) { stringBuilder.append(USER_AGENT_KEY +"=").append(USER_AGENT).append(System.lineSeparator()); stringBuilder.append(feedResponseDiagnostics); } } else { if (parentNode != null) { parentNode.putPOJO(COSMOS_DIAGNOSTICS_KEY, clientSideRequestStatistics); } if (stringBuilder != null) { try { stringBuilder.append(OBJECT_MAPPER.writeValueAsString(this.clientSideRequestStatistics)); } catch (JsonProcessingException e) { LOGGER.error("Error while parsing diagnostics ", e); } } } } void setFeedResponseDiagnostics(FeedResponseDiagnostics feedResponseDiagnostics) { this.feedResponseDiagnostics = feedResponseDiagnostics; } }
Please add logic about slash. Refs: https://github.com/Azure/azure-sdk-for-java/blob/22aa07f799d07a885c15e7bda917ef5329d96f40/sdk/spring/azure-spring-boot/src/main/java/com/azure/spring/aad/AADAuthorizationServerEndpoints.java#L35
public void setGraphBaseUri(String graphBaseUri) { this.graphBaseUri = graphBaseUri; }
this.graphBaseUri = graphBaseUri;
public void setGraphBaseUri(String graphBaseUri) { this.graphBaseUri = graphBaseUri; }
class UserGroupProperties { /** * Expected UserGroups that an authority will be granted to if found in the response from the MemeberOf Graph * API Call. */ private List<String> allowedGroups = new ArrayList<>(); public List<String> getAllowedGroups() { return allowedGroups; } public void setAllowedGroups(List<String> allowedGroups) { this.allowedGroups = allowedGroups; } }
class UserGroupProperties { /** * Expected UserGroups that an authority will be granted to if found in the response from the MemeberOf Graph * API Call. */ private List<String> allowedGroups = new ArrayList<>(); public List<String> getAllowedGroups() { return allowedGroups; } public void setAllowedGroups(List<String> allowedGroups) { this.allowedGroups = allowedGroups; } }
We should do something with the context.
public void graphUriConfigurationTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(),"https: assertEquals(properties.getGraphMembershipUri(),"https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(),"https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(),"https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { }); }
.run(context -> {
public void graphUriConfigurationTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); }
class AADWebAppConfigurationTest { @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } @Test private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
class AADWebAppConfigurationTest { @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints( "https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints( "http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test @Test(expected = IllegalStateException.class) public void graphUriConfigurationWithExceptionTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues("azure.activedirectory.user-group.allowed-groups = group1, group2") .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } @Test public void haveResourceServerScopeInAccessTokenWhenThereAreMultiResourceServerScopesInAuthCode() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.office.scopes = " + "https: "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); AzureClientRegistration azure = repo.getAzureClient(); assertNotNull(azure); int resourceServerCountInAuthCode = resourceServerCount(azure.getClient().getScopes()); assertTrue(resourceServerCountInAuthCode > 1); int resourceServerCountInAccessToken = resourceServerCount(azure.getAccessTokenScopes()); assertTrue(resourceServerCountInAccessToken != 0); }); } @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils .getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void resourceServerCountTest() { Set<String> scopes = new HashSet<>(); assertEquals(resourceServerCount(scopes), 0); scopes.add("openid"); scopes.add("profile"); scopes.add("offline_access"); assertEquals(resourceServerCount(scopes), 0); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); } private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
And it's better to move this case to line 234.
public void graphUriConfigurationTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(),"https: assertEquals(properties.getGraphMembershipUri(),"https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(),"https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(),"https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { }); }
.run(context -> {
public void graphUriConfigurationTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); }
class AADWebAppConfigurationTest { @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } @Test private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
class AADWebAppConfigurationTest { @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints( "https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints( "http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test @Test(expected = IllegalStateException.class) public void graphUriConfigurationWithExceptionTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues("azure.activedirectory.user-group.allowed-groups = group1, group2") .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } @Test public void haveResourceServerScopeInAccessTokenWhenThereAreMultiResourceServerScopesInAuthCode() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.office.scopes = " + "https: "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); AzureClientRegistration azure = repo.getAzureClient(); assertNotNull(azure); int resourceServerCountInAuthCode = resourceServerCount(azure.getClient().getScopes()); assertTrue(resourceServerCountInAuthCode > 1); int resourceServerCountInAccessToken = resourceServerCount(azure.getAccessTokenScopes()); assertTrue(resourceServerCountInAccessToken != 0); }); } @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils .getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void resourceServerCountTest() { Set<String> scopes = new HashSet<>(); assertEquals(resourceServerCount(scopes), 0); scopes.add("openid"); scopes.add("profile"); scopes.add("offline_access"); assertEquals(resourceServerCount(scopes), 0); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); } private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
Please add another test with: ``` .withPropertyValues( "azure.activedirectory.graph-membership-uri=https://microsoftgraph.chinacloudapi.cn/v1.0/me/memberOf" ) ```
public void graphUriConfigurationTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(),"https: assertEquals(properties.getGraphMembershipUri(),"https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(),"https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(),"https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { }); }
.run(context -> {
public void graphUriConfigurationTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); }
class AADWebAppConfigurationTest { @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } @Test private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
class AADWebAppConfigurationTest { @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints( "https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints( "http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test @Test(expected = IllegalStateException.class) public void graphUriConfigurationWithExceptionTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues("azure.activedirectory.user-group.allowed-groups = group1, group2") .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } @Test public void haveResourceServerScopeInAccessTokenWhenThereAreMultiResourceServerScopesInAuthCode() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.office.scopes = " + "https: "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); AzureClientRegistration azure = repo.getAzureClient(); assertNotNull(azure); int resourceServerCountInAuthCode = resourceServerCount(azure.getClient().getScopes()); assertTrue(resourceServerCountInAuthCode > 1); int resourceServerCountInAccessToken = resourceServerCount(azure.getAccessTokenScopes()); assertTrue(resourceServerCountInAccessToken != 0); }); } @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils .getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void resourceServerCountTest() { Set<String> scopes = new HashSet<>(); assertEquals(resourceServerCount(scopes), 0); scopes.add("openid"); scopes.add("profile"); scopes.add("offline_access"); assertEquals(resourceServerCount(scopes), 0); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); } private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
Please add another test with: ``` .withPropertyValues( "azure.activedirectory.graph-base-uri=https://microsoftgraph.chinacloudapi.cn/" ) ```
public void graphUriConfigurationTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(),"https: assertEquals(properties.getGraphMembershipUri(),"https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(),"https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(),"https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { }); }
)
public void graphUriConfigurationTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); }
class AADWebAppConfigurationTest { @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.user-group.allowed-groups = group1, group2" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } @Test private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
class AADWebAppConfigurationTest { @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints( "https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints( "http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test @Test(expected = IllegalStateException.class) public void graphUriConfigurationWithExceptionTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues("azure.activedirectory.user-group.allowed-groups = group1, group2") .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } @Test public void haveResourceServerScopeInAccessTokenWhenThereAreMultiResourceServerScopesInAuthCode() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.office.scopes = " + "https: "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); AzureClientRegistration azure = repo.getAzureClient(); assertNotNull(azure); int resourceServerCountInAuthCode = resourceServerCount(azure.getClient().getScopes()); assertTrue(resourceServerCountInAuthCode > 1); int resourceServerCountInAccessToken = resourceServerCount(azure.getAccessTokenScopes()); assertTrue(resourceServerCountInAccessToken != 0); }); } @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils .getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void resourceServerCountTest() { Set<String> scopes = new HashSet<>(); assertEquals(resourceServerCount(scopes), 0); scopes.add("openid"); scopes.add("profile"); scopes.add("offline_access"); assertEquals(resourceServerCount(scopes), 0); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); } private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
Move this test case to line 305.
public void graphUriConfigurationTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphMembershipUri(), "https: }); }
"azure.activedirectory.graph-membership-uri=https:
public void graphUriConfigurationTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); }
class AADWebAppConfigurationTest { @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues("azure.activedirectory.user-group.allowed-groups = group1, group2") .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } @Test public void haveResourceServerScopeInAccessTokenWhenThereAreMultiResourceServerScopesInAuthCode() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.office.scopes =" + " https: "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); AzureClientRegistration azure = repo.getAzureClient(); assertNotNull(azure); int resourceServerCountInAuthCode = resourceServerCount(azure.getClient().getScopes()); assertTrue(resourceServerCountInAuthCode > 1); int resourceServerCountInAccessToken = resourceServerCount(azure.getAccessTokenScopes()); assertTrue(resourceServerCountInAccessToken != 0); }); } @Test public void resourceServerCountTest() { Set<String> scopes = new HashSet<>(); assertEquals(resourceServerCount(scopes), 0); scopes.add("openid"); scopes.add("profile"); scopes.add("offline_access"); assertEquals(resourceServerCount(scopes), 0); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); } @Test private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
class AADWebAppConfigurationTest { @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints( "https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints( "http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test @Test(expected = IllegalStateException.class) public void graphUriConfigurationWithExceptionTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues("azure.activedirectory.user-group.allowed-groups = group1, group2") .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } @Test public void haveResourceServerScopeInAccessTokenWhenThereAreMultiResourceServerScopesInAuthCode() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.office.scopes = " + "https: "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); AzureClientRegistration azure = repo.getAzureClient(); assertNotNull(azure); int resourceServerCountInAuthCode = resourceServerCount(azure.getClient().getScopes()); assertTrue(resourceServerCountInAuthCode > 1); int resourceServerCountInAccessToken = resourceServerCount(azure.getAccessTokenScopes()); assertTrue(resourceServerCountInAccessToken != 0); }); } @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils .getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void resourceServerCountTest() { Set<String> scopes = new HashSet<>(); assertEquals(resourceServerCount(scopes), 0); scopes.add("openid"); scopes.add("profile"); scopes.add("offline_access"); assertEquals(resourceServerCount(scopes), 0); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); } private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
We need 2 `assertEquals` for every test case.
public void graphUriConfigurationTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphMembershipUri(), "https: }); }
"https:
public void graphUriConfigurationTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); }
class AADWebAppConfigurationTest { @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues("azure.activedirectory.user-group.allowed-groups = group1, group2") .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } @Test public void haveResourceServerScopeInAccessTokenWhenThereAreMultiResourceServerScopesInAuthCode() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.office.scopes =" + " https: "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); AzureClientRegistration azure = repo.getAzureClient(); assertNotNull(azure); int resourceServerCountInAuthCode = resourceServerCount(azure.getClient().getScopes()); assertTrue(resourceServerCountInAuthCode > 1); int resourceServerCountInAccessToken = resourceServerCount(azure.getAccessTokenScopes()); assertTrue(resourceServerCountInAccessToken != 0); }); } @Test public void resourceServerCountTest() { Set<String> scopes = new HashSet<>(); assertEquals(resourceServerCount(scopes), 0); scopes.add("openid"); scopes.add("profile"); scopes.add("offline_access"); assertEquals(resourceServerCount(scopes), 0); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); } @Test private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
class AADWebAppConfigurationTest { @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints( "https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints( "http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test @Test(expected = IllegalStateException.class) public void graphUriConfigurationWithExceptionTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues("azure.activedirectory.user-group.allowed-groups = group1, group2") .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } @Test public void haveResourceServerScopeInAccessTokenWhenThereAreMultiResourceServerScopesInAuthCode() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.office.scopes = " + "https: "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); AzureClientRegistration azure = repo.getAzureClient(); assertNotNull(azure); int resourceServerCountInAuthCode = resourceServerCount(azure.getClient().getScopes()); assertTrue(resourceServerCountInAuthCode > 1); int resourceServerCountInAccessToken = resourceServerCount(azure.getAccessTokenScopes()); assertTrue(resourceServerCountInAccessToken != 0); }); } @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils .getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void resourceServerCountTest() { Set<String> scopes = new HashSet<>(); assertEquals(resourceServerCount(scopes), 0); scopes.add("openid"); scopes.add("profile"); scopes.add("offline_access"); assertEquals(resourceServerCount(scopes), 0); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); } private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
We still need the test cause which will throw IllegalStateException.
public void graphUriConfigurationTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphMembershipUri(), "https: }); }
"azure.activedirectory.graph-membership-uri=https:
public void graphUriConfigurationTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-base-uri=https: "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); assertEquals(properties.getGraphBaseUri(), "https: assertEquals(properties.getGraphMembershipUri(), "https: }); }
class AADWebAppConfigurationTest { @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints("http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils.getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues("azure.activedirectory.user-group.allowed-groups = group1, group2") .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } @Test public void haveResourceServerScopeInAccessTokenWhenThereAreMultiResourceServerScopesInAuthCode() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.office.scopes =" + " https: "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); AzureClientRegistration azure = repo.getAzureClient(); assertNotNull(azure); int resourceServerCountInAuthCode = resourceServerCount(azure.getClient().getScopes()); assertTrue(resourceServerCountInAuthCode > 1); int resourceServerCountInAccessToken = resourceServerCount(azure.getAccessTokenScopes()); assertTrue(resourceServerCountInAccessToken != 0); }); } @Test public void resourceServerCountTest() { Set<String> scopes = new HashSet<>(); assertEquals(resourceServerCount(scopes), 0); scopes.add("openid"); scopes.add("profile"); scopes.add("offline_access"); assertEquals(resourceServerCount(scopes), 0); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); } @Test private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
class AADWebAppConfigurationTest { @Test public void aadAwareClientRepository() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access" ); assertEquals(clientRepo.getAzureClient().getClient(), azure); assertFalse(clientRepo.isClientNeedConsentWhenLogin(azure)); assertTrue(clientRepo.isClientNeedConsentWhenLogin(graph)); assertFalse(clientRepo.isClientNeedConsentWhenLogin("azure")); assertTrue(clientRepo.isClientNeedConsentWhenLogin("graph")); List<ClientRegistration> clients = collectClients(clientRepo); assertEquals(1, clients.size()); assertEquals("azure", clients.get(0).getRegistrationId()); }); } @Test public void clientRegistered() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertNotNull(azure); assertEquals("fake-client-id", azure.getClientId()); assertEquals("fake-client-secret", azure.getClientSecret()); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints( "https: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); assertEquals("{baseUrl}/login/oauth2/code/{registrationId}", azure.getRedirectUriTemplate()); assertDefaultScopes(azure, "openid", "profile"); }); } @Test public void clientRequiresMultiPermissions() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertDefaultScopes( azure, "openid", "profile", "offline_access", "Calendars.Read", "https: assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void clientRequiresOnDemandPermissions() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read", "azure.activedirectory.authorization-clients.graph.on-demand = true", "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = repo.findByRegistrationId("azure"); ClientRegistration graph = repo.findByRegistrationId("graph"); ClientRegistration arm = repo.findByRegistrationId("arm"); assertNotNull(azure); assertDefaultScopes( azure, "openid", "profile", "offline_access", "https: assertFalse(repo.isClientNeedConsentWhenLogin(graph)); assertTrue(repo.isClientNeedConsentWhenLogin(arm)); assertFalse(repo.isClientNeedConsentWhenLogin("graph")); assertTrue(repo.isClientNeedConsentWhenLogin("arm")); }); } @Test public void clientRequiresPermissionInDefaultClient() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); }); } @Test public void clientRequiresPermissionRegistered() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.graph.scopes = Calendars.Read" ) .run(context -> { ClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); ClientRegistration graph = clientRepo.findByRegistrationId("graph"); assertNotNull(azure); assertNotNull(graph); assertDefaultScopes(azure, "openid", "profile", "offline_access", "Calendars.Read"); assertDefaultScopes(graph, "Calendars.Read"); }); } @Test public void configurationOnRequiredProperties() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .run(context -> { assertThat(context).hasSingleBean(AADWebAppClientRegistrationRepository.class); assertThat(context).hasSingleBean(OAuth2AuthorizedClientRepository.class); assertThat(context).hasSingleBean(OAuth2UserService.class); }); } @Test public void customizeUri() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.base-uri = http: ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); ClientRegistration azure = clientRepo.findByRegistrationId("azure"); AADAuthorizationServerEndpoints endpoints = new AADAuthorizationServerEndpoints( "http: assertEquals(endpoints.authorizationEndpoint(), azure.getProviderDetails().getAuthorizationUri()); assertEquals(endpoints.tokenEndpoint(), azure.getProviderDetails().getTokenUri()); assertEquals(endpoints.jwkSetEndpoint(), azure.getProviderDetails().getJwkSetUri()); }); } @Test public void defaultClientWithAuthzScope() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties().withPropertyValues( "azure.activedirectory.authorization-clients.azure.scopes = Calendars.Read" ) .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "offline_access", "Calendars.Read" ); }); } @Test @Test(expected = IllegalStateException.class) public void graphUriConfigurationWithExceptionTest() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.graph-membership-uri=https: ) .run(context -> { AADAuthenticationProperties properties = context.getBean(AADAuthenticationProperties.class); }); } @Test public void groupConfiguration() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues("azure.activedirectory.user-group.allowed-groups = group1, group2") .run(context -> { AADWebAppClientRegistrationRepository clientRepo = context.getBean(AADWebAppClientRegistrationRepository.class); assertDefaultScopes( clientRepo.getAzureClient(), "openid", "profile", "https: "https: ); }); } @Test public void haveResourceServerScopeInAccessTokenWhenThereAreMultiResourceServerScopesInAuthCode() { WebApplicationContextRunnerUtils .getContextRunnerWithRequiredProperties() .withPropertyValues( "azure.activedirectory.authorization-clients.office.scopes = " + "https: "azure.activedirectory.authorization-clients.arm.scopes = " + "https: ) .run(context -> { AADWebAppClientRegistrationRepository repo = context.getBean(AADWebAppClientRegistrationRepository.class); AzureClientRegistration azure = repo.getAzureClient(); assertNotNull(azure); int resourceServerCountInAuthCode = resourceServerCount(azure.getClient().getScopes()); assertTrue(resourceServerCountInAuthCode > 1); int resourceServerCountInAccessToken = resourceServerCount(azure.getAccessTokenScopes()); assertTrue(resourceServerCountInAccessToken != 0); }); } @Test public void noConfigurationOnMissingRequiredProperties() { WebApplicationContextRunnerUtils .getContextRunner() .run(context -> { assertThat(context).doesNotHaveBean(AADWebAppClientRegistrationRepository.class); assertThat(context).doesNotHaveBean(OAuth2AuthorizedClientRepository.class); assertThat(context).doesNotHaveBean(OAuth2UserService.class); }); } @Test public void resourceServerCountTest() { Set<String> scopes = new HashSet<>(); assertEquals(resourceServerCount(scopes), 0); scopes.add("openid"); scopes.add("profile"); scopes.add("offline_access"); assertEquals(resourceServerCount(scopes), 0); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 1); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); scopes.add("https: assertEquals(resourceServerCount(scopes), 2); } private void assertDefaultScopes(ClientRegistration client, String... scopes) { assertEquals(scopes.length, client.getScopes().size()); for (String s : scopes) { assertTrue(client.getScopes().contains(s)); } } private void assertDefaultScopes(AzureClientRegistration client, String... expected) { assertEquals(expected.length, client.getAccessTokenScopes().size()); for (String e : expected) { assertTrue(client.getAccessTokenScopes().contains(e)); } } private List<ClientRegistration> collectClients(Iterable<ClientRegistration> itr) { List<ClientRegistration> result = new ArrayList<>(); itr.forEach(result::add); return result; } }
Should we log it, if there being new type later?
private ActiveDirectoryObject parseDirectoryObject(MicrosoftGraphDirectoryObjectInner inner) { if (inner.additionalProperties() != null) { Object odataTypeObject = inner.additionalProperties().get("@odata.type"); if (odataTypeObject instanceof String) { SerializerAdapter serializerAdapter = ((MicrosoftGraphClientImpl)manager().serviceClient()).getSerializerAdapter(); String odataType = ((String) odataTypeObject).toLowerCase(Locale.ROOT); try { String jsonString = serializerAdapter.serialize(inner, SerializerEncoding.JSON); if (odataType.endsWith(" MicrosoftGraphUserInner userInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphUserInner.class, SerializerEncoding.JSON); return new ActiveDirectoryUserImpl(userInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphGroupInner groupInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphGroupInner.class, SerializerEncoding.JSON); return new ActiveDirectoryGroupImpl(groupInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphServicePrincipalInner servicePrincipalInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphServicePrincipalInner.class, SerializerEncoding.JSON); return new ServicePrincipalImpl(servicePrincipalInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphApplicationInner applicationInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphApplicationInner.class, SerializerEncoding.JSON); return new ActiveDirectoryApplicationImpl(applicationInner, manager()); } else { return null; } } catch (IOException e) { return null; } } } return null; }
return null;
private ActiveDirectoryObject parseDirectoryObject(MicrosoftGraphDirectoryObjectInner inner) { if (inner.additionalProperties() != null) { Object odataTypeObject = inner.additionalProperties().get("@odata.type"); if (odataTypeObject instanceof String) { SerializerAdapter serializerAdapter = ((MicrosoftGraphClientImpl) manager().serviceClient()).getSerializerAdapter(); String odataType = ((String) odataTypeObject).toLowerCase(Locale.ROOT); try { String jsonString = serializerAdapter.serialize(inner, SerializerEncoding.JSON); if (odataType.endsWith(" MicrosoftGraphUserInner userInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphUserInner.class, SerializerEncoding.JSON); return new ActiveDirectoryUserImpl(userInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphGroupInner groupInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphGroupInner.class, SerializerEncoding.JSON); return new ActiveDirectoryGroupImpl(groupInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphServicePrincipalInner servicePrincipalInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphServicePrincipalInner.class, SerializerEncoding.JSON); return new ServicePrincipalImpl(servicePrincipalInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphApplicationInner applicationInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphApplicationInner.class, SerializerEncoding.JSON); return new ActiveDirectoryApplicationImpl(applicationInner, manager()); } else { logger.warning("Can't recognize member type '{}' of ActiveDirectoryGroup", odataType); return null; } } catch (IOException e) { logger.logThrowableAsWarning(e); return null; } } } return null; }
class ActiveDirectoryGroupImpl extends CreatableUpdatableImpl<ActiveDirectoryGroup, MicrosoftGraphGroupInner, ActiveDirectoryGroupImpl> implements ActiveDirectoryGroup, ActiveDirectoryGroup.Definition, ActiveDirectoryGroup.Update { private final AuthorizationManager manager; private Set<Map<String, Object>> membersToAdd; private Set<String> membersToRemove; ActiveDirectoryGroupImpl(MicrosoftGraphGroupInner innerModel, AuthorizationManager manager) { super(innerModel.displayName(), innerModel); this.manager = manager; membersToAdd = new HashSet<>(); membersToRemove = new HashSet<>(); } @Override public boolean securityEnabled() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().securityEnabled()); } @Override public String mail() { return innerModel().mail(); } @Override public List<ActiveDirectoryObject> listMembers() { return listMembersAsync().collectList().block(); } @Override public PagedFlux<ActiveDirectoryObject> listMembersAsync() { return PagedConverter.flatMapPage(manager() .serviceClient() .getGroups() .listMembersAsync(id()), directoryObjectInner -> Mono.justOrEmpty(parseDirectoryObject(directoryObjectInner)) ); } @Override protected Mono<MicrosoftGraphGroupInner> getInnerAsync() { return manager().serviceClient().getGroupsGroups().getGroupAsync(id()); } @Override public boolean isInCreateMode() { return id() == null; } @Override public Mono<ActiveDirectoryGroup> createResourceAsync() { Mono<ActiveDirectoryGroup> group = Mono.just(this); if (isInCreateMode()) { if (innerModel().mailEnabled() == null) { innerModel().withMailEnabled(false); } if (innerModel().securityEnabled() == null) { innerModel().withSecurityEnabled(true); } group = manager().serviceClient().getGroupsGroups().createGroupAsync(innerModel()) .map(innerToFluentMap(this)); } if (!membersToRemove.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToRemove) .flatMap(s -> manager().serviceClient().getGroups().deleteRefMemberAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToRemove.clear())); } if (!membersToAdd.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToAdd) .flatMap(s -> manager().serviceClient().getGroups().createRefMembersAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToAdd.clear())); } return group; } @Override public ActiveDirectoryGroupImpl withEmailAlias(String mailNickname) { if (mailNickname.contains("@")) { String[] parts = mailNickname.split("@"); mailNickname = parts[0]; } innerModel().withMailNickname(mailNickname); return this; } @Override public ActiveDirectoryGroupImpl withMember(String objectId) { String membersKey = "@odata.id"; membersToAdd.add(new HashMap<>() {{ put(membersKey, String.format("%s/directoryObjects/%s", manager().serviceClient().getEndpoint(), objectId)); }}); return this; } @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryUser user) { return withMember(user.id()); } @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryGroup group) { return withMember(group.id()); } @Override public ActiveDirectoryGroupImpl withMember(ServicePrincipal servicePrincipal) { return withMember(servicePrincipal.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(String objectId) { membersToRemove.add(objectId); return this; } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryUser user) { return withoutMember(user.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryGroup group) { return withoutMember(group.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ServicePrincipal servicePrincipal) { return withoutMember(servicePrincipal.id()); } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }
class ActiveDirectoryGroupImpl extends CreatableUpdatableImpl<ActiveDirectoryGroup, MicrosoftGraphGroupInner, ActiveDirectoryGroupImpl> implements ActiveDirectoryGroup, ActiveDirectoryGroup.Definition, ActiveDirectoryGroup.Update { private final AuthorizationManager manager; private final ClientLogger logger = new ClientLogger(getClass()); private Set<Map<String, Object>> membersToAdd; private Set<String> membersToRemove; ActiveDirectoryGroupImpl(MicrosoftGraphGroupInner innerModel, AuthorizationManager manager) { super(innerModel.displayName(), innerModel); this.manager = manager; membersToAdd = new HashSet<>(); membersToRemove = new HashSet<>(); } @Override public boolean securityEnabled() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().securityEnabled()); } @Override public String mail() { return innerModel().mail(); } @Override public List<ActiveDirectoryObject> listMembers() { return listMembersAsync().collectList().block(); } @Override public PagedFlux<ActiveDirectoryObject> listMembersAsync() { return PagedConverter.flatMapPage(manager() .serviceClient() .getGroups() .listMembersAsync(id()), directoryObjectInner -> Mono.justOrEmpty(parseDirectoryObject(directoryObjectInner)) ); } @Override protected Mono<MicrosoftGraphGroupInner> getInnerAsync() { return manager().serviceClient().getGroupsGroups().getGroupAsync(id()); } @Override public boolean isInCreateMode() { return id() == null; } @Override public Mono<ActiveDirectoryGroup> createResourceAsync() { Mono<ActiveDirectoryGroup> group = Mono.just(this); if (isInCreateMode()) { if (innerModel().mailEnabled() == null) { innerModel().withMailEnabled(false); } if (innerModel().securityEnabled() == null) { innerModel().withSecurityEnabled(true); } group = manager().serviceClient().getGroupsGroups().createGroupAsync(innerModel()) .map(innerToFluentMap(this)); } if (!membersToRemove.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToRemove) .flatMap(s -> manager().serviceClient().getGroups().deleteRefMemberAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToRemove.clear())); } if (!membersToAdd.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToAdd) .flatMap(s -> manager().serviceClient().getGroups().createRefMembersAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToAdd.clear())); } return group; } @Override public ActiveDirectoryGroupImpl withEmailAlias(String mailNickname) { if (mailNickname.contains("@")) { String[] parts = mailNickname.split("@"); mailNickname = parts[0]; } innerModel().withMailNickname(mailNickname); return this; } @Override public ActiveDirectoryGroupImpl withMember(String objectId) { String membersKey = "@odata.id"; membersToAdd.add( Collections.singletonMap(membersKey, String.format("%s/directoryObjects/%s", manager().serviceClient().getEndpoint(), objectId))); return this; } @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryUser user) { return withMember(user.id()); } @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryGroup group) { return withMember(group.id()); } @Override public ActiveDirectoryGroupImpl withMember(ServicePrincipal servicePrincipal) { return withMember(servicePrincipal.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(String objectId) { membersToRemove.add(objectId); return this; } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryUser user) { return withoutMember(user.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryGroup group) { return withoutMember(group.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ServicePrincipal servicePrincipal) { return withoutMember(servicePrincipal.id()); } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }
Maybe `Collections.singletonMap` would be better (if it is immutable)? `{{` seems to give a subclass of `HashMap`.
public ActiveDirectoryGroupImpl withMember(String objectId) { String membersKey = "@odata.id"; membersToAdd.add(new HashMap<>() {{ put(membersKey, String.format("%s/directoryObjects/%s", manager().serviceClient().getEndpoint(), objectId)); }}); return this; }
membersToAdd.add(new HashMap<>() {{
public ActiveDirectoryGroupImpl withMember(String objectId) { String membersKey = "@odata.id"; membersToAdd.add( Collections.singletonMap(membersKey, String.format("%s/directoryObjects/%s", manager().serviceClient().getEndpoint(), objectId))); return this; }
class ActiveDirectoryGroupImpl extends CreatableUpdatableImpl<ActiveDirectoryGroup, MicrosoftGraphGroupInner, ActiveDirectoryGroupImpl> implements ActiveDirectoryGroup, ActiveDirectoryGroup.Definition, ActiveDirectoryGroup.Update { private final AuthorizationManager manager; private Set<Map<String, Object>> membersToAdd; private Set<String> membersToRemove; ActiveDirectoryGroupImpl(MicrosoftGraphGroupInner innerModel, AuthorizationManager manager) { super(innerModel.displayName(), innerModel); this.manager = manager; membersToAdd = new HashSet<>(); membersToRemove = new HashSet<>(); } @Override public boolean securityEnabled() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().securityEnabled()); } @Override public String mail() { return innerModel().mail(); } @Override public List<ActiveDirectoryObject> listMembers() { return listMembersAsync().collectList().block(); } @Override public PagedFlux<ActiveDirectoryObject> listMembersAsync() { return PagedConverter.flatMapPage(manager() .serviceClient() .getGroups() .listMembersAsync(id()), directoryObjectInner -> Mono.justOrEmpty(parseDirectoryObject(directoryObjectInner)) ); } private ActiveDirectoryObject parseDirectoryObject(MicrosoftGraphDirectoryObjectInner inner) { if (inner.additionalProperties() != null) { Object odataTypeObject = inner.additionalProperties().get("@odata.type"); if (odataTypeObject instanceof String) { SerializerAdapter serializerAdapter = ((MicrosoftGraphClientImpl)manager().serviceClient()).getSerializerAdapter(); String odataType = ((String) odataTypeObject).toLowerCase(Locale.ROOT); try { String jsonString = serializerAdapter.serialize(inner, SerializerEncoding.JSON); if (odataType.endsWith(" MicrosoftGraphUserInner userInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphUserInner.class, SerializerEncoding.JSON); return new ActiveDirectoryUserImpl(userInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphGroupInner groupInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphGroupInner.class, SerializerEncoding.JSON); return new ActiveDirectoryGroupImpl(groupInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphServicePrincipalInner servicePrincipalInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphServicePrincipalInner.class, SerializerEncoding.JSON); return new ServicePrincipalImpl(servicePrincipalInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphApplicationInner applicationInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphApplicationInner.class, SerializerEncoding.JSON); return new ActiveDirectoryApplicationImpl(applicationInner, manager()); } else { return null; } } catch (IOException e) { return null; } } } return null; } @Override protected Mono<MicrosoftGraphGroupInner> getInnerAsync() { return manager().serviceClient().getGroupsGroups().getGroupAsync(id()); } @Override public boolean isInCreateMode() { return id() == null; } @Override public Mono<ActiveDirectoryGroup> createResourceAsync() { Mono<ActiveDirectoryGroup> group = Mono.just(this); if (isInCreateMode()) { if (innerModel().mailEnabled() == null) { innerModel().withMailEnabled(false); } if (innerModel().securityEnabled() == null) { innerModel().withSecurityEnabled(true); } group = manager().serviceClient().getGroupsGroups().createGroupAsync(innerModel()) .map(innerToFluentMap(this)); } if (!membersToRemove.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToRemove) .flatMap(s -> manager().serviceClient().getGroups().deleteRefMemberAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToRemove.clear())); } if (!membersToAdd.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToAdd) .flatMap(s -> manager().serviceClient().getGroups().createRefMembersAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToAdd.clear())); } return group; } @Override public ActiveDirectoryGroupImpl withEmailAlias(String mailNickname) { if (mailNickname.contains("@")) { String[] parts = mailNickname.split("@"); mailNickname = parts[0]; } innerModel().withMailNickname(mailNickname); return this; } @Override @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryUser user) { return withMember(user.id()); } @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryGroup group) { return withMember(group.id()); } @Override public ActiveDirectoryGroupImpl withMember(ServicePrincipal servicePrincipal) { return withMember(servicePrincipal.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(String objectId) { membersToRemove.add(objectId); return this; } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryUser user) { return withoutMember(user.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryGroup group) { return withoutMember(group.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ServicePrincipal servicePrincipal) { return withoutMember(servicePrincipal.id()); } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }
class ActiveDirectoryGroupImpl extends CreatableUpdatableImpl<ActiveDirectoryGroup, MicrosoftGraphGroupInner, ActiveDirectoryGroupImpl> implements ActiveDirectoryGroup, ActiveDirectoryGroup.Definition, ActiveDirectoryGroup.Update { private final AuthorizationManager manager; private final ClientLogger logger = new ClientLogger(getClass()); private Set<Map<String, Object>> membersToAdd; private Set<String> membersToRemove; ActiveDirectoryGroupImpl(MicrosoftGraphGroupInner innerModel, AuthorizationManager manager) { super(innerModel.displayName(), innerModel); this.manager = manager; membersToAdd = new HashSet<>(); membersToRemove = new HashSet<>(); } @Override public boolean securityEnabled() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().securityEnabled()); } @Override public String mail() { return innerModel().mail(); } @Override public List<ActiveDirectoryObject> listMembers() { return listMembersAsync().collectList().block(); } @Override public PagedFlux<ActiveDirectoryObject> listMembersAsync() { return PagedConverter.flatMapPage(manager() .serviceClient() .getGroups() .listMembersAsync(id()), directoryObjectInner -> Mono.justOrEmpty(parseDirectoryObject(directoryObjectInner)) ); } private ActiveDirectoryObject parseDirectoryObject(MicrosoftGraphDirectoryObjectInner inner) { if (inner.additionalProperties() != null) { Object odataTypeObject = inner.additionalProperties().get("@odata.type"); if (odataTypeObject instanceof String) { SerializerAdapter serializerAdapter = ((MicrosoftGraphClientImpl) manager().serviceClient()).getSerializerAdapter(); String odataType = ((String) odataTypeObject).toLowerCase(Locale.ROOT); try { String jsonString = serializerAdapter.serialize(inner, SerializerEncoding.JSON); if (odataType.endsWith(" MicrosoftGraphUserInner userInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphUserInner.class, SerializerEncoding.JSON); return new ActiveDirectoryUserImpl(userInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphGroupInner groupInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphGroupInner.class, SerializerEncoding.JSON); return new ActiveDirectoryGroupImpl(groupInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphServicePrincipalInner servicePrincipalInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphServicePrincipalInner.class, SerializerEncoding.JSON); return new ServicePrincipalImpl(servicePrincipalInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphApplicationInner applicationInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphApplicationInner.class, SerializerEncoding.JSON); return new ActiveDirectoryApplicationImpl(applicationInner, manager()); } else { logger.warning("Can't recognize member type '{}' of ActiveDirectoryGroup", odataType); return null; } } catch (IOException e) { logger.logThrowableAsWarning(e); return null; } } } return null; } @Override protected Mono<MicrosoftGraphGroupInner> getInnerAsync() { return manager().serviceClient().getGroupsGroups().getGroupAsync(id()); } @Override public boolean isInCreateMode() { return id() == null; } @Override public Mono<ActiveDirectoryGroup> createResourceAsync() { Mono<ActiveDirectoryGroup> group = Mono.just(this); if (isInCreateMode()) { if (innerModel().mailEnabled() == null) { innerModel().withMailEnabled(false); } if (innerModel().securityEnabled() == null) { innerModel().withSecurityEnabled(true); } group = manager().serviceClient().getGroupsGroups().createGroupAsync(innerModel()) .map(innerToFluentMap(this)); } if (!membersToRemove.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToRemove) .flatMap(s -> manager().serviceClient().getGroups().deleteRefMemberAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToRemove.clear())); } if (!membersToAdd.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToAdd) .flatMap(s -> manager().serviceClient().getGroups().createRefMembersAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToAdd.clear())); } return group; } @Override public ActiveDirectoryGroupImpl withEmailAlias(String mailNickname) { if (mailNickname.contains("@")) { String[] parts = mailNickname.split("@"); mailNickname = parts[0]; } innerModel().withMailNickname(mailNickname); return this; } @Override @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryUser user) { return withMember(user.id()); } @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryGroup group) { return withMember(group.id()); } @Override public ActiveDirectoryGroupImpl withMember(ServicePrincipal servicePrincipal) { return withMember(servicePrincipal.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(String objectId) { membersToRemove.add(objectId); return this; } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryUser user) { return withoutMember(user.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryGroup group) { return withoutMember(group.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ServicePrincipal servicePrincipal) { return withoutMember(servicePrincipal.id()); } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }
done
private ActiveDirectoryObject parseDirectoryObject(MicrosoftGraphDirectoryObjectInner inner) { if (inner.additionalProperties() != null) { Object odataTypeObject = inner.additionalProperties().get("@odata.type"); if (odataTypeObject instanceof String) { SerializerAdapter serializerAdapter = ((MicrosoftGraphClientImpl)manager().serviceClient()).getSerializerAdapter(); String odataType = ((String) odataTypeObject).toLowerCase(Locale.ROOT); try { String jsonString = serializerAdapter.serialize(inner, SerializerEncoding.JSON); if (odataType.endsWith(" MicrosoftGraphUserInner userInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphUserInner.class, SerializerEncoding.JSON); return new ActiveDirectoryUserImpl(userInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphGroupInner groupInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphGroupInner.class, SerializerEncoding.JSON); return new ActiveDirectoryGroupImpl(groupInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphServicePrincipalInner servicePrincipalInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphServicePrincipalInner.class, SerializerEncoding.JSON); return new ServicePrincipalImpl(servicePrincipalInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphApplicationInner applicationInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphApplicationInner.class, SerializerEncoding.JSON); return new ActiveDirectoryApplicationImpl(applicationInner, manager()); } else { return null; } } catch (IOException e) { return null; } } } return null; }
return null;
private ActiveDirectoryObject parseDirectoryObject(MicrosoftGraphDirectoryObjectInner inner) { if (inner.additionalProperties() != null) { Object odataTypeObject = inner.additionalProperties().get("@odata.type"); if (odataTypeObject instanceof String) { SerializerAdapter serializerAdapter = ((MicrosoftGraphClientImpl) manager().serviceClient()).getSerializerAdapter(); String odataType = ((String) odataTypeObject).toLowerCase(Locale.ROOT); try { String jsonString = serializerAdapter.serialize(inner, SerializerEncoding.JSON); if (odataType.endsWith(" MicrosoftGraphUserInner userInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphUserInner.class, SerializerEncoding.JSON); return new ActiveDirectoryUserImpl(userInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphGroupInner groupInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphGroupInner.class, SerializerEncoding.JSON); return new ActiveDirectoryGroupImpl(groupInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphServicePrincipalInner servicePrincipalInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphServicePrincipalInner.class, SerializerEncoding.JSON); return new ServicePrincipalImpl(servicePrincipalInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphApplicationInner applicationInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphApplicationInner.class, SerializerEncoding.JSON); return new ActiveDirectoryApplicationImpl(applicationInner, manager()); } else { logger.warning("Can't recognize member type '{}' of ActiveDirectoryGroup", odataType); return null; } } catch (IOException e) { logger.logThrowableAsWarning(e); return null; } } } return null; }
class ActiveDirectoryGroupImpl extends CreatableUpdatableImpl<ActiveDirectoryGroup, MicrosoftGraphGroupInner, ActiveDirectoryGroupImpl> implements ActiveDirectoryGroup, ActiveDirectoryGroup.Definition, ActiveDirectoryGroup.Update { private final AuthorizationManager manager; private Set<Map<String, Object>> membersToAdd; private Set<String> membersToRemove; ActiveDirectoryGroupImpl(MicrosoftGraphGroupInner innerModel, AuthorizationManager manager) { super(innerModel.displayName(), innerModel); this.manager = manager; membersToAdd = new HashSet<>(); membersToRemove = new HashSet<>(); } @Override public boolean securityEnabled() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().securityEnabled()); } @Override public String mail() { return innerModel().mail(); } @Override public List<ActiveDirectoryObject> listMembers() { return listMembersAsync().collectList().block(); } @Override public PagedFlux<ActiveDirectoryObject> listMembersAsync() { return PagedConverter.flatMapPage(manager() .serviceClient() .getGroups() .listMembersAsync(id()), directoryObjectInner -> Mono.justOrEmpty(parseDirectoryObject(directoryObjectInner)) ); } @Override protected Mono<MicrosoftGraphGroupInner> getInnerAsync() { return manager().serviceClient().getGroupsGroups().getGroupAsync(id()); } @Override public boolean isInCreateMode() { return id() == null; } @Override public Mono<ActiveDirectoryGroup> createResourceAsync() { Mono<ActiveDirectoryGroup> group = Mono.just(this); if (isInCreateMode()) { if (innerModel().mailEnabled() == null) { innerModel().withMailEnabled(false); } if (innerModel().securityEnabled() == null) { innerModel().withSecurityEnabled(true); } group = manager().serviceClient().getGroupsGroups().createGroupAsync(innerModel()) .map(innerToFluentMap(this)); } if (!membersToRemove.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToRemove) .flatMap(s -> manager().serviceClient().getGroups().deleteRefMemberAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToRemove.clear())); } if (!membersToAdd.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToAdd) .flatMap(s -> manager().serviceClient().getGroups().createRefMembersAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToAdd.clear())); } return group; } @Override public ActiveDirectoryGroupImpl withEmailAlias(String mailNickname) { if (mailNickname.contains("@")) { String[] parts = mailNickname.split("@"); mailNickname = parts[0]; } innerModel().withMailNickname(mailNickname); return this; } @Override public ActiveDirectoryGroupImpl withMember(String objectId) { String membersKey = "@odata.id"; membersToAdd.add(new HashMap<>() {{ put(membersKey, String.format("%s/directoryObjects/%s", manager().serviceClient().getEndpoint(), objectId)); }}); return this; } @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryUser user) { return withMember(user.id()); } @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryGroup group) { return withMember(group.id()); } @Override public ActiveDirectoryGroupImpl withMember(ServicePrincipal servicePrincipal) { return withMember(servicePrincipal.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(String objectId) { membersToRemove.add(objectId); return this; } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryUser user) { return withoutMember(user.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryGroup group) { return withoutMember(group.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ServicePrincipal servicePrincipal) { return withoutMember(servicePrincipal.id()); } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }
class ActiveDirectoryGroupImpl extends CreatableUpdatableImpl<ActiveDirectoryGroup, MicrosoftGraphGroupInner, ActiveDirectoryGroupImpl> implements ActiveDirectoryGroup, ActiveDirectoryGroup.Definition, ActiveDirectoryGroup.Update { private final AuthorizationManager manager; private final ClientLogger logger = new ClientLogger(getClass()); private Set<Map<String, Object>> membersToAdd; private Set<String> membersToRemove; ActiveDirectoryGroupImpl(MicrosoftGraphGroupInner innerModel, AuthorizationManager manager) { super(innerModel.displayName(), innerModel); this.manager = manager; membersToAdd = new HashSet<>(); membersToRemove = new HashSet<>(); } @Override public boolean securityEnabled() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().securityEnabled()); } @Override public String mail() { return innerModel().mail(); } @Override public List<ActiveDirectoryObject> listMembers() { return listMembersAsync().collectList().block(); } @Override public PagedFlux<ActiveDirectoryObject> listMembersAsync() { return PagedConverter.flatMapPage(manager() .serviceClient() .getGroups() .listMembersAsync(id()), directoryObjectInner -> Mono.justOrEmpty(parseDirectoryObject(directoryObjectInner)) ); } @Override protected Mono<MicrosoftGraphGroupInner> getInnerAsync() { return manager().serviceClient().getGroupsGroups().getGroupAsync(id()); } @Override public boolean isInCreateMode() { return id() == null; } @Override public Mono<ActiveDirectoryGroup> createResourceAsync() { Mono<ActiveDirectoryGroup> group = Mono.just(this); if (isInCreateMode()) { if (innerModel().mailEnabled() == null) { innerModel().withMailEnabled(false); } if (innerModel().securityEnabled() == null) { innerModel().withSecurityEnabled(true); } group = manager().serviceClient().getGroupsGroups().createGroupAsync(innerModel()) .map(innerToFluentMap(this)); } if (!membersToRemove.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToRemove) .flatMap(s -> manager().serviceClient().getGroups().deleteRefMemberAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToRemove.clear())); } if (!membersToAdd.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToAdd) .flatMap(s -> manager().serviceClient().getGroups().createRefMembersAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToAdd.clear())); } return group; } @Override public ActiveDirectoryGroupImpl withEmailAlias(String mailNickname) { if (mailNickname.contains("@")) { String[] parts = mailNickname.split("@"); mailNickname = parts[0]; } innerModel().withMailNickname(mailNickname); return this; } @Override public ActiveDirectoryGroupImpl withMember(String objectId) { String membersKey = "@odata.id"; membersToAdd.add( Collections.singletonMap(membersKey, String.format("%s/directoryObjects/%s", manager().serviceClient().getEndpoint(), objectId))); return this; } @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryUser user) { return withMember(user.id()); } @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryGroup group) { return withMember(group.id()); } @Override public ActiveDirectoryGroupImpl withMember(ServicePrincipal servicePrincipal) { return withMember(servicePrincipal.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(String objectId) { membersToRemove.add(objectId); return this; } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryUser user) { return withoutMember(user.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryGroup group) { return withoutMember(group.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ServicePrincipal servicePrincipal) { return withoutMember(servicePrincipal.id()); } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }
done
public ActiveDirectoryGroupImpl withMember(String objectId) { String membersKey = "@odata.id"; membersToAdd.add(new HashMap<>() {{ put(membersKey, String.format("%s/directoryObjects/%s", manager().serviceClient().getEndpoint(), objectId)); }}); return this; }
membersToAdd.add(new HashMap<>() {{
public ActiveDirectoryGroupImpl withMember(String objectId) { String membersKey = "@odata.id"; membersToAdd.add( Collections.singletonMap(membersKey, String.format("%s/directoryObjects/%s", manager().serviceClient().getEndpoint(), objectId))); return this; }
class ActiveDirectoryGroupImpl extends CreatableUpdatableImpl<ActiveDirectoryGroup, MicrosoftGraphGroupInner, ActiveDirectoryGroupImpl> implements ActiveDirectoryGroup, ActiveDirectoryGroup.Definition, ActiveDirectoryGroup.Update { private final AuthorizationManager manager; private Set<Map<String, Object>> membersToAdd; private Set<String> membersToRemove; ActiveDirectoryGroupImpl(MicrosoftGraphGroupInner innerModel, AuthorizationManager manager) { super(innerModel.displayName(), innerModel); this.manager = manager; membersToAdd = new HashSet<>(); membersToRemove = new HashSet<>(); } @Override public boolean securityEnabled() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().securityEnabled()); } @Override public String mail() { return innerModel().mail(); } @Override public List<ActiveDirectoryObject> listMembers() { return listMembersAsync().collectList().block(); } @Override public PagedFlux<ActiveDirectoryObject> listMembersAsync() { return PagedConverter.flatMapPage(manager() .serviceClient() .getGroups() .listMembersAsync(id()), directoryObjectInner -> Mono.justOrEmpty(parseDirectoryObject(directoryObjectInner)) ); } private ActiveDirectoryObject parseDirectoryObject(MicrosoftGraphDirectoryObjectInner inner) { if (inner.additionalProperties() != null) { Object odataTypeObject = inner.additionalProperties().get("@odata.type"); if (odataTypeObject instanceof String) { SerializerAdapter serializerAdapter = ((MicrosoftGraphClientImpl)manager().serviceClient()).getSerializerAdapter(); String odataType = ((String) odataTypeObject).toLowerCase(Locale.ROOT); try { String jsonString = serializerAdapter.serialize(inner, SerializerEncoding.JSON); if (odataType.endsWith(" MicrosoftGraphUserInner userInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphUserInner.class, SerializerEncoding.JSON); return new ActiveDirectoryUserImpl(userInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphGroupInner groupInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphGroupInner.class, SerializerEncoding.JSON); return new ActiveDirectoryGroupImpl(groupInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphServicePrincipalInner servicePrincipalInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphServicePrincipalInner.class, SerializerEncoding.JSON); return new ServicePrincipalImpl(servicePrincipalInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphApplicationInner applicationInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphApplicationInner.class, SerializerEncoding.JSON); return new ActiveDirectoryApplicationImpl(applicationInner, manager()); } else { return null; } } catch (IOException e) { return null; } } } return null; } @Override protected Mono<MicrosoftGraphGroupInner> getInnerAsync() { return manager().serviceClient().getGroupsGroups().getGroupAsync(id()); } @Override public boolean isInCreateMode() { return id() == null; } @Override public Mono<ActiveDirectoryGroup> createResourceAsync() { Mono<ActiveDirectoryGroup> group = Mono.just(this); if (isInCreateMode()) { if (innerModel().mailEnabled() == null) { innerModel().withMailEnabled(false); } if (innerModel().securityEnabled() == null) { innerModel().withSecurityEnabled(true); } group = manager().serviceClient().getGroupsGroups().createGroupAsync(innerModel()) .map(innerToFluentMap(this)); } if (!membersToRemove.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToRemove) .flatMap(s -> manager().serviceClient().getGroups().deleteRefMemberAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToRemove.clear())); } if (!membersToAdd.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToAdd) .flatMap(s -> manager().serviceClient().getGroups().createRefMembersAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToAdd.clear())); } return group; } @Override public ActiveDirectoryGroupImpl withEmailAlias(String mailNickname) { if (mailNickname.contains("@")) { String[] parts = mailNickname.split("@"); mailNickname = parts[0]; } innerModel().withMailNickname(mailNickname); return this; } @Override @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryUser user) { return withMember(user.id()); } @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryGroup group) { return withMember(group.id()); } @Override public ActiveDirectoryGroupImpl withMember(ServicePrincipal servicePrincipal) { return withMember(servicePrincipal.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(String objectId) { membersToRemove.add(objectId); return this; } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryUser user) { return withoutMember(user.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryGroup group) { return withoutMember(group.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ServicePrincipal servicePrincipal) { return withoutMember(servicePrincipal.id()); } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }
class ActiveDirectoryGroupImpl extends CreatableUpdatableImpl<ActiveDirectoryGroup, MicrosoftGraphGroupInner, ActiveDirectoryGroupImpl> implements ActiveDirectoryGroup, ActiveDirectoryGroup.Definition, ActiveDirectoryGroup.Update { private final AuthorizationManager manager; private final ClientLogger logger = new ClientLogger(getClass()); private Set<Map<String, Object>> membersToAdd; private Set<String> membersToRemove; ActiveDirectoryGroupImpl(MicrosoftGraphGroupInner innerModel, AuthorizationManager manager) { super(innerModel.displayName(), innerModel); this.manager = manager; membersToAdd = new HashSet<>(); membersToRemove = new HashSet<>(); } @Override public boolean securityEnabled() { return ResourceManagerUtils.toPrimitiveBoolean(innerModel().securityEnabled()); } @Override public String mail() { return innerModel().mail(); } @Override public List<ActiveDirectoryObject> listMembers() { return listMembersAsync().collectList().block(); } @Override public PagedFlux<ActiveDirectoryObject> listMembersAsync() { return PagedConverter.flatMapPage(manager() .serviceClient() .getGroups() .listMembersAsync(id()), directoryObjectInner -> Mono.justOrEmpty(parseDirectoryObject(directoryObjectInner)) ); } private ActiveDirectoryObject parseDirectoryObject(MicrosoftGraphDirectoryObjectInner inner) { if (inner.additionalProperties() != null) { Object odataTypeObject = inner.additionalProperties().get("@odata.type"); if (odataTypeObject instanceof String) { SerializerAdapter serializerAdapter = ((MicrosoftGraphClientImpl) manager().serviceClient()).getSerializerAdapter(); String odataType = ((String) odataTypeObject).toLowerCase(Locale.ROOT); try { String jsonString = serializerAdapter.serialize(inner, SerializerEncoding.JSON); if (odataType.endsWith(" MicrosoftGraphUserInner userInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphUserInner.class, SerializerEncoding.JSON); return new ActiveDirectoryUserImpl(userInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphGroupInner groupInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphGroupInner.class, SerializerEncoding.JSON); return new ActiveDirectoryGroupImpl(groupInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphServicePrincipalInner servicePrincipalInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphServicePrincipalInner.class, SerializerEncoding.JSON); return new ServicePrincipalImpl(servicePrincipalInner, manager()); } else if (odataType.endsWith(" MicrosoftGraphApplicationInner applicationInner = serializerAdapter.deserialize( jsonString, MicrosoftGraphApplicationInner.class, SerializerEncoding.JSON); return new ActiveDirectoryApplicationImpl(applicationInner, manager()); } else { logger.warning("Can't recognize member type '{}' of ActiveDirectoryGroup", odataType); return null; } } catch (IOException e) { logger.logThrowableAsWarning(e); return null; } } } return null; } @Override protected Mono<MicrosoftGraphGroupInner> getInnerAsync() { return manager().serviceClient().getGroupsGroups().getGroupAsync(id()); } @Override public boolean isInCreateMode() { return id() == null; } @Override public Mono<ActiveDirectoryGroup> createResourceAsync() { Mono<ActiveDirectoryGroup> group = Mono.just(this); if (isInCreateMode()) { if (innerModel().mailEnabled() == null) { innerModel().withMailEnabled(false); } if (innerModel().securityEnabled() == null) { innerModel().withSecurityEnabled(true); } group = manager().serviceClient().getGroupsGroups().createGroupAsync(innerModel()) .map(innerToFluentMap(this)); } if (!membersToRemove.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToRemove) .flatMap(s -> manager().serviceClient().getGroups().deleteRefMemberAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToRemove.clear())); } if (!membersToAdd.isEmpty()) { group = group .flatMap( o -> Flux .fromIterable(membersToAdd) .flatMap(s -> manager().serviceClient().getGroups().createRefMembersAsync(id(), s)) .singleOrEmpty() .thenReturn(this) .doFinally(signalType -> membersToAdd.clear())); } return group; } @Override public ActiveDirectoryGroupImpl withEmailAlias(String mailNickname) { if (mailNickname.contains("@")) { String[] parts = mailNickname.split("@"); mailNickname = parts[0]; } innerModel().withMailNickname(mailNickname); return this; } @Override @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryUser user) { return withMember(user.id()); } @Override public ActiveDirectoryGroupImpl withMember(ActiveDirectoryGroup group) { return withMember(group.id()); } @Override public ActiveDirectoryGroupImpl withMember(ServicePrincipal servicePrincipal) { return withMember(servicePrincipal.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(String objectId) { membersToRemove.add(objectId); return this; } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryUser user) { return withoutMember(user.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ActiveDirectoryGroup group) { return withoutMember(group.id()); } @Override public ActiveDirectoryGroupImpl withoutMember(ServicePrincipal servicePrincipal) { return withoutMember(servicePrincipal.id()); } @Override public String id() { return innerModel().id(); } @Override public AuthorizationManager manager() { return this.manager; } }