comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Does `resourceLoader` use `blobServiceAsyncClient` to sent request?
protected void doHealthCheck(Health.Builder builder) { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); } else { builder.withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); try { resourceLoader.getResource("azure-blob: builder.up(); } catch (Exception e) { builder.down(); } } }
resourceLoader.getResource("azure-blob:
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); if (exists == null) { throw new RuntimeException("Error occurred checking the container existence!"); } builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private final ResourceLoader resourceLoader; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * * @param blobServiceAsyncClient the blob service client * @param resourceLoader the resource loader */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient, ResourceLoader resourceLoader) { this.blobServiceAsyncClient = blobServiceAsyncClient; this.resourceLoader = resourceLoader; } @Override }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
And in this case, `builder` will have no `status`. Is this expected behavior?
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); } if (response == null) { builder.down(); } else { builder.up().withDetail("database", response.getProperties().getId()); } }catch (Exception e) { if (e instanceof NotFoundException) { builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured correctly!"); } else { throw e; } } } }
throw e;
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
why not use the sdk api directly?
protected void doHealthCheck(Health.Builder builder) { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); } else { builder.withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); try { resourceLoader.getResource("azure-blob: builder.up(); } catch (Exception e) { builder.down(); } } }
resourceLoader.getResource("azure-blob:
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); if (exists == null) { throw new RuntimeException("Error occurred checking the container existence!"); } builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private final ResourceLoader resourceLoader; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * * @param blobServiceAsyncClient the blob service client * @param resourceLoader the resource loader */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient, ResourceLoader resourceLoader) { this.blobServiceAsyncClient = blobServiceAsyncClient; this.resourceLoader = resourceLoader; } @Override }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
This place is to refer: KeyVaultCertificateHealthIndicator, ``` @Override protected void doHealthCheck(Health.Builder builder) { try { this.certificateAsyncClient.getCertificateWithResponse("spring-cloud-azure-not-existing-certificate") .block(timeout); builder.up(); } catch (Exception e) { if (e instanceof ResourceNotFoundException) { builder.up(); } else { throw e; } } } ``` I will refactor this.
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); } if (response == null) { builder.down(); } else { builder.up().withDetail("database", response.getProperties().getId()); } }catch (Exception e) { if (e instanceof NotFoundException) { builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured correctly!"); } else { throw e; } } } }
throw e;
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Context is immutable.
public Context mergeContext(Context context) { return CoreUtils.mergeContexts(this.getContext(), context); }
return CoreUtils.mergeContexts(this.getContext(), context);
public Context mergeContext(Context context) { return CoreUtils.mergeContexts(this.getContext(), context); }
class AzureServiceClient { private final ClientLogger logger = new ClientLogger(getClass()); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure.properties"); private static final String SDK_VERSION; static { SDK_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); } private final SerializerAdapter serializerAdapter; private final HttpPipeline httpPipeline; private final String sdkName; /** * Creates a new instance of {@link AzureServiceClient}. * * @param httpPipeline The HttpPipline used by the client. * @param serializerAdapter The SerializerAdapter used by the client. * @param environment The AzureEnvironment used by the client. */ protected AzureServiceClient(HttpPipeline httpPipeline, SerializerAdapter serializerAdapter, AzureEnvironment environment) { this.httpPipeline = httpPipeline; this.serializerAdapter = serializerAdapter; String packageName = this.getClass().getPackage().getName(); String implementationSegment = ".implementation"; if (packageName.endsWith(implementationSegment)) { packageName = packageName.substring(0, packageName.length() - implementationSegment.length()); } this.sdkName = packageName; } /** * Gets serializer adapter for JSON serialization/de-serialization. * * @return the serializer adapter. */ private SerializerAdapter getSerializerAdapter() { return this.serializerAdapter; } /** * Gets The HTTP pipeline to send requests through. * * @return the httpPipeline value. */ public HttpPipeline getHttpPipeline() { return this.httpPipeline; } /** * Gets The default poll interval for long-running operation. * * @return the defaultPollInterval value. */ public abstract Duration getDefaultPollInterval(); /** * Gets default client context. * * @return the default client context. */ public Context getContext() { return new Context("Sdk-Name", sdkName) .addData("Sdk-Version", SDK_VERSION); } /** * Merges default client context with provided context. * * @param context the context to be merged with default client context. * @return the merged context. */ /** * Gets long running operation result. * * @param lroInit the raw response of init operation. * @param httpPipeline the http pipeline. * @param pollResultType type of poll result. * @param finalResultType type of final result. * @param context the context shared by all requests. * @param <T> type of poll result. * @param <U> type of final result. * @return poller flux for poll result and final result. */ public <T, U> PollerFlux<PollResult<T>, U> getLroResult(Mono<Response<Flux<ByteBuffer>>> lroInit, HttpPipeline httpPipeline, Type pollResultType, Type finalResultType, Context context) { return PollerFactory.create( getSerializerAdapter(), httpPipeline, pollResultType, finalResultType, ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(this.getDefaultPollInterval()), lroInit, context ); } /** * Gets the final result, or an error, based on last async poll response. * * @param response the last async poll response. * @param <T> type of poll result. * @param <U> type of final result. * @return the final result, or an error. */ public <T, U> Mono<U> getLroFinalResultOrError(AsyncPollResponse<PollResult<T>, U> response) { if (response.getStatus() != LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) { String errorMessage; ManagementError managementError = null; HttpResponse errorResponse = null; PollResult.Error lroError = response.getValue().getError(); if (lroError != null) { errorResponse = new HttpResponseImpl(lroError.getResponseStatusCode(), lroError.getResponseHeaders(), lroError.getResponseBody()); errorMessage = response.getValue().getError().getMessage(); String errorBody = response.getValue().getError().getResponseBody(); if (errorBody != null) { try { managementError = this.getSerializerAdapter().deserialize( errorBody, ManagementError.class, SerializerEncoding.JSON); if (managementError.getCode() == null || managementError.getMessage() == null) { managementError = null; } } catch (IOException | RuntimeException ioe) { logger.logThrowableAsWarning(ioe); } } } else { errorMessage = "Long running operation failed."; } if (managementError == null) { managementError = new ManagementError(response.getStatus().toString(), errorMessage); } return Mono.error(new ManagementException(errorMessage, errorResponse, managementError)); } else { return response.getFinalResult(); } } private static class HttpResponseImpl extends HttpResponse { private final int statusCode; private final byte[] responseBody; private final HttpHeaders httpHeaders; HttpResponseImpl(int statusCode, HttpHeaders httpHeaders, String responseBody) { super(null); this.statusCode = statusCode; this.httpHeaders = httpHeaders; this.responseBody = responseBody == null ? null : responseBody.getBytes(StandardCharsets.UTF_8); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String s) { return httpHeaders.getValue(s); } @Override public HttpHeaders getHeaders() { return httpHeaders; } @Override public Flux<ByteBuffer> getBody() { return Flux.just(ByteBuffer.wrap(responseBody)); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.just(responseBody); } @Override public Mono<String> getBodyAsString() { return Mono.just(new String(responseBody, StandardCharsets.UTF_8)); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.just(new String(responseBody, charset)); } } }
class AzureServiceClient { private final ClientLogger logger = new ClientLogger(getClass()); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure.properties"); private static final String SDK_VERSION; static { SDK_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); } private final SerializerAdapter serializerAdapter; private final HttpPipeline httpPipeline; private final String sdkName; /** * Creates a new instance of {@link AzureServiceClient}. * * @param httpPipeline The HttpPipline used by the client. * @param serializerAdapter The SerializerAdapter used by the client. * @param environment The AzureEnvironment used by the client. */ protected AzureServiceClient(HttpPipeline httpPipeline, SerializerAdapter serializerAdapter, AzureEnvironment environment) { this.httpPipeline = httpPipeline; this.serializerAdapter = serializerAdapter; String packageName = this.getClass().getPackage().getName(); String implementationSegment = ".implementation"; if (packageName.endsWith(implementationSegment)) { packageName = packageName.substring(0, packageName.length() - implementationSegment.length()); } this.sdkName = packageName; } /** * Gets serializer adapter for JSON serialization/de-serialization. * * @return the serializer adapter. */ private SerializerAdapter getSerializerAdapter() { return this.serializerAdapter; } /** * Gets The HTTP pipeline to send requests through. * * @return the httpPipeline value. */ public HttpPipeline getHttpPipeline() { return this.httpPipeline; } /** * Gets The default poll interval for long-running operation. * * @return the defaultPollInterval value. */ public abstract Duration getDefaultPollInterval(); /** * Gets default client context. * * @return the default client context. */ public Context getContext() { return new Context("Sdk-Name", sdkName) .addData("Sdk-Version", SDK_VERSION); } /** * Merges default client context with provided context. * * @param context the context to be merged with default client context. * @return the merged context. */ /** * Gets long running operation result. * * @param lroInit the raw response of init operation. * @param httpPipeline the http pipeline. * @param pollResultType type of poll result. * @param finalResultType type of final result. * @param context the context shared by all requests. * @param <T> type of poll result. * @param <U> type of final result. * @return poller flux for poll result and final result. */ public <T, U> PollerFlux<PollResult<T>, U> getLroResult(Mono<Response<Flux<ByteBuffer>>> lroInit, HttpPipeline httpPipeline, Type pollResultType, Type finalResultType, Context context) { return PollerFactory.create( getSerializerAdapter(), httpPipeline, pollResultType, finalResultType, ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(this.getDefaultPollInterval()), lroInit, context ); } /** * Gets the final result, or an error, based on last async poll response. * * @param response the last async poll response. * @param <T> type of poll result. * @param <U> type of final result. * @return the final result, or an error. */ public <T, U> Mono<U> getLroFinalResultOrError(AsyncPollResponse<PollResult<T>, U> response) { if (response.getStatus() != LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) { String errorMessage; ManagementError managementError = null; HttpResponse errorResponse = null; PollResult.Error lroError = response.getValue().getError(); if (lroError != null) { errorResponse = new HttpResponseImpl(lroError.getResponseStatusCode(), lroError.getResponseHeaders(), lroError.getResponseBody()); errorMessage = response.getValue().getError().getMessage(); String errorBody = response.getValue().getError().getResponseBody(); if (errorBody != null) { try { managementError = this.getSerializerAdapter().deserialize( errorBody, ManagementError.class, SerializerEncoding.JSON); if (managementError.getCode() == null || managementError.getMessage() == null) { managementError = null; } } catch (IOException | RuntimeException ioe) { logger.logThrowableAsWarning(ioe); } } } else { errorMessage = "Long running operation failed."; } if (managementError == null) { managementError = new ManagementError(response.getStatus().toString(), errorMessage); } return Mono.error(new ManagementException(errorMessage, errorResponse, managementError)); } else { return response.getFinalResult(); } } private static class HttpResponseImpl extends HttpResponse { private final int statusCode; private final byte[] responseBody; private final HttpHeaders httpHeaders; HttpResponseImpl(int statusCode, HttpHeaders httpHeaders, String responseBody) { super(null); this.statusCode = statusCode; this.httpHeaders = httpHeaders; this.responseBody = responseBody == null ? null : responseBody.getBytes(StandardCharsets.UTF_8); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String s) { return httpHeaders.getValue(s); } @Override public HttpHeaders getHeaders() { return httpHeaders; } @Override public Flux<ByteBuffer> getBody() { return Flux.just(ByteBuffer.wrap(responseBody)); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.just(responseBody); } @Override public Mono<String> getBodyAsString() { return Mono.just(new String(responseBody, StandardCharsets.UTF_8)); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.just(new String(responseBody, charset)); } } }
Can we use the `Status.UNKNOWN` and put this information in the builder.withDetail, just like how https://github.com/Azure/azure-sdk-for-java/blob/5bc550c9a5de4f8ee93a5b3141500ee34be3850d/sdk/spring/spring-cloud-azure-actuator/src/main/java/com/azure/spring/cloud/actuator/eventhubs/EventHubsHealthIndicator.java#L39 does.
protected void doHealthCheck(Builder builder) throws Exception { if (database == null) { builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.withDetail("RUs", response.getRequestCharge()); builder.withDetail("CosmosUri", endpoint); builder.up().withDetail("database", response.getProperties().getId()); } else { builder.down(); } } catch (Exception e) { if (e instanceof NotFoundException) { builder.up().withDetail("database", "The option of `spring.cloud.azure.cosmos.database` is not " + "configured correctly!"); } else { throw e; } } } }
builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured!");
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
How about letting `LOGGER.info(...)` and `builder.up().withDetail()` output the same information. Use `database` instead of `response.getProperties().getId()`.
protected void doHealthCheck(Builder builder) throws Exception { if (database == null) { builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.withDetail("RUs", response.getRequestCharge()); builder.withDetail("CosmosUri", endpoint); builder.up().withDetail("database", response.getProperties().getId()); } else { builder.down(); } } catch (Exception e) { if (e instanceof NotFoundException) { builder.up().withDetail("database", "The option of `spring.cloud.azure.cosmos.database` is not " + "configured correctly!"); } else { throw e; } } } }
builder.up().withDetail("database", response.getProperties().getId());
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
How about simplify ``` } catch (Exception e) { if (e instanceof NotFoundException) { builder.up().withDetail("database", "The option of `spring.cloud.azure.cosmos.database` is not " + "configured correctly!"); } else { throw e; } } ``` to ``` } catch (NotFoundException e) { builder.up().withDetail("database", "The option of `spring.cloud.azure.cosmos.database` is not configured correctly!"); } ```
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); } if (response == null) { builder.down(); } else { builder.up().withDetail("database", response.getProperties().getId()); } }catch (Exception e) { if (e instanceof NotFoundException) { builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured correctly!"); } else { throw e; } } } }
throw e;
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Is this the only place that merges context?
public Context mergeContext(Context context) { return CoreUtils.mergeContexts(this.getContext(), context); }
return CoreUtils.mergeContexts(this.getContext(), context);
public Context mergeContext(Context context) { return CoreUtils.mergeContexts(this.getContext(), context); }
class AzureServiceClient { private final ClientLogger logger = new ClientLogger(getClass()); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure.properties"); private static final String SDK_VERSION; static { SDK_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); } private final SerializerAdapter serializerAdapter; private final HttpPipeline httpPipeline; private final String sdkName; /** * Creates a new instance of {@link AzureServiceClient}. * * @param httpPipeline The HttpPipline used by the client. * @param serializerAdapter The SerializerAdapter used by the client. * @param environment The AzureEnvironment used by the client. */ protected AzureServiceClient(HttpPipeline httpPipeline, SerializerAdapter serializerAdapter, AzureEnvironment environment) { this.httpPipeline = httpPipeline; this.serializerAdapter = serializerAdapter; String packageName = this.getClass().getPackage().getName(); String implementationSegment = ".implementation"; if (packageName.endsWith(implementationSegment)) { packageName = packageName.substring(0, packageName.length() - implementationSegment.length()); } this.sdkName = packageName; } /** * Gets serializer adapter for JSON serialization/de-serialization. * * @return the serializer adapter. */ private SerializerAdapter getSerializerAdapter() { return this.serializerAdapter; } /** * Gets The HTTP pipeline to send requests through. * * @return the httpPipeline value. */ public HttpPipeline getHttpPipeline() { return this.httpPipeline; } /** * Gets The default poll interval for long-running operation. * * @return the defaultPollInterval value. */ public abstract Duration getDefaultPollInterval(); /** * Gets default client context. * * @return the default client context. */ public Context getContext() { return new Context("Sdk-Name", sdkName) .addData("Sdk-Version", SDK_VERSION); } /** * Merges default client context with provided context. * * @param context the context to be merged with default client context. * @return the merged context. */ /** * Gets long running operation result. * * @param lroInit the raw response of init operation. * @param httpPipeline the http pipeline. * @param pollResultType type of poll result. * @param finalResultType type of final result. * @param context the context shared by all requests. * @param <T> type of poll result. * @param <U> type of final result. * @return poller flux for poll result and final result. */ public <T, U> PollerFlux<PollResult<T>, U> getLroResult(Mono<Response<Flux<ByteBuffer>>> lroInit, HttpPipeline httpPipeline, Type pollResultType, Type finalResultType, Context context) { return PollerFactory.create( getSerializerAdapter(), httpPipeline, pollResultType, finalResultType, ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(this.getDefaultPollInterval()), lroInit, context ); } /** * Gets the final result, or an error, based on last async poll response. * * @param response the last async poll response. * @param <T> type of poll result. * @param <U> type of final result. * @return the final result, or an error. */ public <T, U> Mono<U> getLroFinalResultOrError(AsyncPollResponse<PollResult<T>, U> response) { if (response.getStatus() != LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) { String errorMessage; ManagementError managementError = null; HttpResponse errorResponse = null; PollResult.Error lroError = response.getValue().getError(); if (lroError != null) { errorResponse = new HttpResponseImpl(lroError.getResponseStatusCode(), lroError.getResponseHeaders(), lroError.getResponseBody()); errorMessage = response.getValue().getError().getMessage(); String errorBody = response.getValue().getError().getResponseBody(); if (errorBody != null) { try { managementError = this.getSerializerAdapter().deserialize( errorBody, ManagementError.class, SerializerEncoding.JSON); if (managementError.getCode() == null || managementError.getMessage() == null) { managementError = null; } } catch (IOException | RuntimeException ioe) { logger.logThrowableAsWarning(ioe); } } } else { errorMessage = "Long running operation failed."; } if (managementError == null) { managementError = new ManagementError(response.getStatus().toString(), errorMessage); } return Mono.error(new ManagementException(errorMessage, errorResponse, managementError)); } else { return response.getFinalResult(); } } private static class HttpResponseImpl extends HttpResponse { private final int statusCode; private final byte[] responseBody; private final HttpHeaders httpHeaders; HttpResponseImpl(int statusCode, HttpHeaders httpHeaders, String responseBody) { super(null); this.statusCode = statusCode; this.httpHeaders = httpHeaders; this.responseBody = responseBody == null ? null : responseBody.getBytes(StandardCharsets.UTF_8); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String s) { return httpHeaders.getValue(s); } @Override public HttpHeaders getHeaders() { return httpHeaders; } @Override public Flux<ByteBuffer> getBody() { return Flux.just(ByteBuffer.wrap(responseBody)); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.just(responseBody); } @Override public Mono<String> getBodyAsString() { return Mono.just(new String(responseBody, StandardCharsets.UTF_8)); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.just(new String(responseBody, charset)); } } }
class AzureServiceClient { private final ClientLogger logger = new ClientLogger(getClass()); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure.properties"); private static final String SDK_VERSION; static { SDK_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); } private final SerializerAdapter serializerAdapter; private final HttpPipeline httpPipeline; private final String sdkName; /** * Creates a new instance of {@link AzureServiceClient}. * * @param httpPipeline The HttpPipline used by the client. * @param serializerAdapter The SerializerAdapter used by the client. * @param environment The AzureEnvironment used by the client. */ protected AzureServiceClient(HttpPipeline httpPipeline, SerializerAdapter serializerAdapter, AzureEnvironment environment) { this.httpPipeline = httpPipeline; this.serializerAdapter = serializerAdapter; String packageName = this.getClass().getPackage().getName(); String implementationSegment = ".implementation"; if (packageName.endsWith(implementationSegment)) { packageName = packageName.substring(0, packageName.length() - implementationSegment.length()); } this.sdkName = packageName; } /** * Gets serializer adapter for JSON serialization/de-serialization. * * @return the serializer adapter. */ private SerializerAdapter getSerializerAdapter() { return this.serializerAdapter; } /** * Gets The HTTP pipeline to send requests through. * * @return the httpPipeline value. */ public HttpPipeline getHttpPipeline() { return this.httpPipeline; } /** * Gets The default poll interval for long-running operation. * * @return the defaultPollInterval value. */ public abstract Duration getDefaultPollInterval(); /** * Gets default client context. * * @return the default client context. */ public Context getContext() { return new Context("Sdk-Name", sdkName) .addData("Sdk-Version", SDK_VERSION); } /** * Merges default client context with provided context. * * @param context the context to be merged with default client context. * @return the merged context. */ /** * Gets long running operation result. * * @param lroInit the raw response of init operation. * @param httpPipeline the http pipeline. * @param pollResultType type of poll result. * @param finalResultType type of final result. * @param context the context shared by all requests. * @param <T> type of poll result. * @param <U> type of final result. * @return poller flux for poll result and final result. */ public <T, U> PollerFlux<PollResult<T>, U> getLroResult(Mono<Response<Flux<ByteBuffer>>> lroInit, HttpPipeline httpPipeline, Type pollResultType, Type finalResultType, Context context) { return PollerFactory.create( getSerializerAdapter(), httpPipeline, pollResultType, finalResultType, ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(this.getDefaultPollInterval()), lroInit, context ); } /** * Gets the final result, or an error, based on last async poll response. * * @param response the last async poll response. * @param <T> type of poll result. * @param <U> type of final result. * @return the final result, or an error. */ public <T, U> Mono<U> getLroFinalResultOrError(AsyncPollResponse<PollResult<T>, U> response) { if (response.getStatus() != LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) { String errorMessage; ManagementError managementError = null; HttpResponse errorResponse = null; PollResult.Error lroError = response.getValue().getError(); if (lroError != null) { errorResponse = new HttpResponseImpl(lroError.getResponseStatusCode(), lroError.getResponseHeaders(), lroError.getResponseBody()); errorMessage = response.getValue().getError().getMessage(); String errorBody = response.getValue().getError().getResponseBody(); if (errorBody != null) { try { managementError = this.getSerializerAdapter().deserialize( errorBody, ManagementError.class, SerializerEncoding.JSON); if (managementError.getCode() == null || managementError.getMessage() == null) { managementError = null; } } catch (IOException | RuntimeException ioe) { logger.logThrowableAsWarning(ioe); } } } else { errorMessage = "Long running operation failed."; } if (managementError == null) { managementError = new ManagementError(response.getStatus().toString(), errorMessage); } return Mono.error(new ManagementException(errorMessage, errorResponse, managementError)); } else { return response.getFinalResult(); } } private static class HttpResponseImpl extends HttpResponse { private final int statusCode; private final byte[] responseBody; private final HttpHeaders httpHeaders; HttpResponseImpl(int statusCode, HttpHeaders httpHeaders, String responseBody) { super(null); this.statusCode = statusCode; this.httpHeaders = httpHeaders; this.responseBody = responseBody == null ? null : responseBody.getBytes(StandardCharsets.UTF_8); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String s) { return httpHeaders.getValue(s); } @Override public HttpHeaders getHeaders() { return httpHeaders; } @Override public Flux<ByteBuffer> getBody() { return Flux.just(ByteBuffer.wrap(responseBody)); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.just(responseBody); } @Override public Mono<String> getBodyAsString() { return Mono.just(new String(responseBody, StandardCharsets.UTF_8)); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.just(new String(responseBody, charset)); } } }
yes
protected void doHealthCheck(Builder builder) throws Exception { if (database == null) { builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.withDetail("RUs", response.getRequestCharge()); builder.withDetail("CosmosUri", endpoint); builder.up().withDetail("database", response.getProperties().getId()); } else { builder.down(); } } catch (Exception e) { if (e instanceof NotFoundException) { builder.up().withDetail("database", "The option of `spring.cloud.azure.cosmos.database` is not " + "configured correctly!"); } else { throw e; } } } }
builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured!");
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
yes
protected void doHealthCheck(Builder builder) throws Exception { if (database == null) { builder.status("The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.withDetail("RUs", response.getRequestCharge()); builder.withDetail("CosmosUri", endpoint); builder.up().withDetail("database", response.getProperties().getId()); } else { builder.down(); } } catch (Exception e) { if (e instanceof NotFoundException) { builder.up().withDetail("database", "The option of `spring.cloud.azure.cosmos.database` is not " + "configured correctly!"); } else { throw e; } } } }
builder.up().withDetail("database", response.getProperties().getId());
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
It's better to put `builder.up()` at the beginning.
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.withDetail("RUs", response.getRequestCharge()); builder.withDetail("CosmosUri", endpoint); builder.up().withDetail("database", database); } else { builder.down(); } } catch (NotFoundException e) { builder.status(Status.UNKNOWN).withDetail("Database not found", "The option of `spring.cloud.azure.cosmos.database` is not configured correctly!"); } } }
builder.up().withDetail("database", database);
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
How about doing like this: ```java builder.up() .withDetail("", "") .withDetail("", ""); ``` Same to all other places.
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.withDetail("RUs", response.getRequestCharge()); builder.withDetail("CosmosUri", endpoint); builder.up().withDetail("database", database); } else { builder.down(); } } catch (NotFoundException e) { builder.status(Status.UNKNOWN).withDetail("Database not found", "The option of `spring.cloud.azure.cosmos.database` is not configured correctly!"); } } }
builder.up().withDetail("database", database);
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
I would prefer to return in this if
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.withDetail("RUs", response.getRequestCharge()); builder.withDetail("CosmosUri", endpoint); builder.up().withDetail("database", database); } else { builder.down(); } } catch (NotFoundException e) { builder.status(Status.UNKNOWN).withDetail("Database not found", "The option of `spring.cloud.azure.cosmos.database` is not configured correctly!"); } } }
} else {
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
yes, this looks better.
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.withDetail("RUs", response.getRequestCharge()); builder.withDetail("CosmosUri", endpoint); builder.up().withDetail("database", database); } else { builder.down(); } } catch (NotFoundException e) { builder.status(Status.UNKNOWN).withDetail("Database not found", "The option of `spring.cloud.azure.cosmos.database` is not configured correctly!"); } } }
builder.up().withDetail("database", database);
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
add a `return` in this block will bring a warn for this method return is void. Means this is unnessary.
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.withDetail("RUs", response.getRequestCharge()); builder.withDetail("CosmosUri", endpoint); builder.up().withDetail("database", database); } else { builder.down(); } } catch (NotFoundException e) { builder.status(Status.UNKNOWN).withDetail("Database not found", "The option of `spring.cloud.azure.cosmos.database` is not configured correctly!"); } } }
} else {
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
How about return builder.build()? Let's discuss offline.
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); } else { try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.withDetail("RUs", response.getRequestCharge()); builder.withDetail("CosmosUri", endpoint); builder.up().withDetail("database", database); } else { builder.down(); } } catch (NotFoundException e) { builder.status(Status.UNKNOWN).withDetail("Database not found", "The option of `spring.cloud.azure.cosmos.database` is not configured correctly!"); } } }
} else {
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
I don't think we should catch `NotFoundException` here, the Javadoc of `certificateAsyncClient.getCertificateWithResponse("spring-cloud-azure-not-existing-certificate")` says `@throws ResourceNotFoundException when a certificate with {@code certificateName} doesn't exist in the key vault.`, but there's no such thing for the `cosmosAsyncClient.getDatabase(database).read()`, and `NotFoundException` is from the implementation package.
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } try { CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail("RUs", response.getRequestCharge()) .withDetail("CosmosUri", endpoint) .withDetail("database", database); } else { builder.down(); } } catch (NotFoundException e) { builder.status(Status.UNKNOWN).withDetail("Database not found", "The option of `spring.cloud.azure.cosmos.database` is not configured correctly!"); } }
} catch (NotFoundException e) {
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Is it enough for us to construct a `getBlobContainerAsyncClient`, do we really need to construct a `BlobAsyncClient`?
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } try { BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( "spring-cloud-azure-not-existing-container"); BlobAsyncClient blobAsyncClient = containerAsyncClient.getBlobAsyncClient( "spring-cloud-azure-not-existing-blob"); builder.withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); BlobRange range = new BlobRange(0, (long) 2); DownloadRetryOptions options = new DownloadRetryOptions().setMaxRetryRequests(3); Mono<BlobDownloadAsyncResponse> response = blobAsyncClient.downloadStreamWithResponse( range, options, null, false); response.block(timeout); builder.up(); } catch (BlobStorageException e) { builder.up(); } }
"spring-cloud-azure-not-existing-blob");
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); if (exists == null) { throw new RuntimeException("Error occurred checking the container existence!"); } builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Let's keep it as a `database`, I am not sure there will someone who uses this detailed `database` in their monitoring system, so this would be like a public API.
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail("RUs", response.getRequestCharge()) .withDetail("CosmosUri", endpoint) .withDetail("Database", database); } else { builder.down(); } }
.withDetail("Database", database);
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Please don't use `assert` in the source code.
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); assert exists != null; builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()) .withDetail("containerName", NOT_EXISTING_CONTAINER) .withDetail("exists", exists.getValue()) .withDetail("statusCode", exists.getStatusCode()); }
assert exists != null;
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); if (exists == null) { throw new RuntimeException("Error occurred checking the container existence!"); } builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
by default the jvm assertion is turned off
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); assert exists != null; builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()) .withDetail("containerName", NOT_EXISTING_CONTAINER) .withDetail("exists", exists.getValue()) .withDetail("statusCode", exists.getStatusCode()); }
assert exists != null;
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); if (exists == null) { throw new RuntimeException("Error occurred checking the container existence!"); } builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Yes, this is to be changed to the following instead: ```java Objects.requireNonNull(exists, "Error occurred checking the container existence!"); ```
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); assert exists != null; builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()) .withDetail("containerName", NOT_EXISTING_CONTAINER) .withDetail("exists", exists.getValue()) .withDetail("statusCode", exists.getStatusCode()); }
assert exists != null;
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); if (exists == null) { throw new RuntimeException("Error occurred checking the container existence!"); } builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
can we start by exposing as few as possible details? Let's only keep the URL_FIELD
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); Objects.requireNonNull(exists, "Error occurred checking the container existence!"); builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()) .withDetail("containerName", NOT_EXISTING_CONTAINER) .withDetail("exists", exists.getValue()) .withDetail("statusCode", exists.getStatusCode()); }
.withDetail("statusCode", exists.getStatusCode());
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); if (exists == null) { throw new RuntimeException("Error occurred checking the container existence!"); } builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Should we use the URL_FIELD here?
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail("RUs", response.getRequestCharge()) .withDetail("CosmosUri", endpoint) .withDetail("database", database); } else { builder.down(); } }
.withDetail("CosmosUri", endpoint)
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
Let's remove RUs for now, and we can add it if there's a customer ask.
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail("RUs", response.getRequestCharge()) .withDetail("CosmosUri", endpoint) .withDetail("database", database); } else { builder.down(); } }
.withDetail("RUs", response.getRequestCharge())
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
URL_FIELD is not in this package and not accessible in cosmos package. I used endpoint here. It is defined in the same package.
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail("RUs", response.getRequestCharge()) .withDetail("CosmosUri", endpoint) .withDetail("database", database); } else { builder.down(); } }
.withDetail("CosmosUri", endpoint)
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
OK
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response != null) { LOGGER.info("The health indicator cost {} RUs, cosmos uri: {}, dbName: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail("RUs", response.getRequestCharge()) .withDetail("CosmosUri", endpoint) .withDetail("database", database); } else { builder.down(); } }
.withDetail("RUs", response.getRequestCharge())
protected void doHealthCheck(Builder builder) { if (database == null) { builder.status(Status.UNKNOWN).withDetail("Database not configured", "The option of `spring.cloud.azure.cosmos.database` is not configured!"); return; } CosmosDatabaseResponse response = this.cosmosAsyncClient.getDatabase(database) .read() .block(timeout); if (response == null) { throw new RuntimeException("Error occurred checking the database!"); } LOGGER.info("The health indicator cost {} RUs, endpoint: {}, database: {}", response.getRequestCharge(), endpoint, database); builder.up() .withDetail(DATA_BASE_FIELD, database); }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class CosmosHealthIndicator extends AbstractHealthIndicator { private static final Logger LOGGER = LoggerFactory.getLogger(CosmosHealthIndicator.class); private final CosmosAsyncClient cosmosAsyncClient; private final String database; private final String endpoint; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link CosmosHealthIndicator}. * * @param cosmosAsyncClient the cosmosAsyncClient * @param database database name * @param endpoint cosmos endpoint */ public CosmosHealthIndicator(CosmosAsyncClient cosmosAsyncClient, String database, String endpoint) { super("Cosmos health check failed"); Assert.notNull(cosmosAsyncClient, "CosmosClient must not be null"); this.cosmosAsyncClient = cosmosAsyncClient; this.database = database; this.endpoint = endpoint; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
OK. will revert.
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); Objects.requireNonNull(exists, "Error occurred checking the container existence!"); builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()) .withDetail("containerName", NOT_EXISTING_CONTAINER) .withDetail("exists", exists.getValue()) .withDetail("statusCode", exists.getStatusCode()); }
.withDetail("statusCode", exists.getStatusCode());
protected void doHealthCheck(Health.Builder builder) throws Exception { if (blobServiceAsyncClient == null) { builder.status(NOT_CONFIGURED_STATUS); return; } BlobContainerAsyncClient containerAsyncClient = blobServiceAsyncClient.getBlobContainerAsyncClient( NOT_EXISTING_CONTAINER); Response<Boolean> exists = containerAsyncClient.existsWithResponse().block(timeout); if (exists == null) { throw new RuntimeException("Error occurred checking the container existence!"); } builder.up() .withDetail(URL_FIELD, blobServiceAsyncClient.getAccountUrl()); }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
class StorageBlobHealthIndicator extends AbstractHealthIndicator { private final BlobServiceAsyncClient blobServiceAsyncClient; private Duration timeout = DEFAULT_HEALTH_CHECK_TIMEOUT; /** * Creates a new instance of {@link StorageBlobHealthIndicator}. * @param blobServiceAsyncClient the blob service client */ public StorageBlobHealthIndicator(BlobServiceAsyncClient blobServiceAsyncClient) { this.blobServiceAsyncClient = blobServiceAsyncClient; } @Override /** * Set health check request timeout. * * @param timeout the duration value. */ public void setTimeout(Duration timeout) { this.timeout = timeout; } }
That is all, AFAIK. Lite would be changed in codegen https://github.com/Azure/autorest.java/pull/1445
public Context mergeContext(Context context) { return CoreUtils.mergeContexts(this.getContext(), context); }
return CoreUtils.mergeContexts(this.getContext(), context);
public Context mergeContext(Context context) { return CoreUtils.mergeContexts(this.getContext(), context); }
class AzureServiceClient { private final ClientLogger logger = new ClientLogger(getClass()); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure.properties"); private static final String SDK_VERSION; static { SDK_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); } private final SerializerAdapter serializerAdapter; private final HttpPipeline httpPipeline; private final String sdkName; /** * Creates a new instance of {@link AzureServiceClient}. * * @param httpPipeline The HttpPipline used by the client. * @param serializerAdapter The SerializerAdapter used by the client. * @param environment The AzureEnvironment used by the client. */ protected AzureServiceClient(HttpPipeline httpPipeline, SerializerAdapter serializerAdapter, AzureEnvironment environment) { this.httpPipeline = httpPipeline; this.serializerAdapter = serializerAdapter; String packageName = this.getClass().getPackage().getName(); String implementationSegment = ".implementation"; if (packageName.endsWith(implementationSegment)) { packageName = packageName.substring(0, packageName.length() - implementationSegment.length()); } this.sdkName = packageName; } /** * Gets serializer adapter for JSON serialization/de-serialization. * * @return the serializer adapter. */ private SerializerAdapter getSerializerAdapter() { return this.serializerAdapter; } /** * Gets The HTTP pipeline to send requests through. * * @return the httpPipeline value. */ public HttpPipeline getHttpPipeline() { return this.httpPipeline; } /** * Gets The default poll interval for long-running operation. * * @return the defaultPollInterval value. */ public abstract Duration getDefaultPollInterval(); /** * Gets default client context. * * @return the default client context. */ public Context getContext() { return new Context("Sdk-Name", sdkName) .addData("Sdk-Version", SDK_VERSION); } /** * Merges default client context with provided context. * * @param context the context to be merged with default client context. * @return the merged context. */ /** * Gets long running operation result. * * @param lroInit the raw response of init operation. * @param httpPipeline the http pipeline. * @param pollResultType type of poll result. * @param finalResultType type of final result. * @param context the context shared by all requests. * @param <T> type of poll result. * @param <U> type of final result. * @return poller flux for poll result and final result. */ public <T, U> PollerFlux<PollResult<T>, U> getLroResult(Mono<Response<Flux<ByteBuffer>>> lroInit, HttpPipeline httpPipeline, Type pollResultType, Type finalResultType, Context context) { return PollerFactory.create( getSerializerAdapter(), httpPipeline, pollResultType, finalResultType, ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(this.getDefaultPollInterval()), lroInit, context ); } /** * Gets the final result, or an error, based on last async poll response. * * @param response the last async poll response. * @param <T> type of poll result. * @param <U> type of final result. * @return the final result, or an error. */ public <T, U> Mono<U> getLroFinalResultOrError(AsyncPollResponse<PollResult<T>, U> response) { if (response.getStatus() != LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) { String errorMessage; ManagementError managementError = null; HttpResponse errorResponse = null; PollResult.Error lroError = response.getValue().getError(); if (lroError != null) { errorResponse = new HttpResponseImpl(lroError.getResponseStatusCode(), lroError.getResponseHeaders(), lroError.getResponseBody()); errorMessage = response.getValue().getError().getMessage(); String errorBody = response.getValue().getError().getResponseBody(); if (errorBody != null) { try { managementError = this.getSerializerAdapter().deserialize( errorBody, ManagementError.class, SerializerEncoding.JSON); if (managementError.getCode() == null || managementError.getMessage() == null) { managementError = null; } } catch (IOException | RuntimeException ioe) { logger.logThrowableAsWarning(ioe); } } } else { errorMessage = "Long running operation failed."; } if (managementError == null) { managementError = new ManagementError(response.getStatus().toString(), errorMessage); } return Mono.error(new ManagementException(errorMessage, errorResponse, managementError)); } else { return response.getFinalResult(); } } private static class HttpResponseImpl extends HttpResponse { private final int statusCode; private final byte[] responseBody; private final HttpHeaders httpHeaders; HttpResponseImpl(int statusCode, HttpHeaders httpHeaders, String responseBody) { super(null); this.statusCode = statusCode; this.httpHeaders = httpHeaders; this.responseBody = responseBody == null ? null : responseBody.getBytes(StandardCharsets.UTF_8); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String s) { return httpHeaders.getValue(s); } @Override public HttpHeaders getHeaders() { return httpHeaders; } @Override public Flux<ByteBuffer> getBody() { return Flux.just(ByteBuffer.wrap(responseBody)); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.just(responseBody); } @Override public Mono<String> getBodyAsString() { return Mono.just(new String(responseBody, StandardCharsets.UTF_8)); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.just(new String(responseBody, charset)); } } }
class AzureServiceClient { private final ClientLogger logger = new ClientLogger(getClass()); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure.properties"); private static final String SDK_VERSION; static { SDK_VERSION = PROPERTIES.getOrDefault("version", "UnknownVersion"); } private final SerializerAdapter serializerAdapter; private final HttpPipeline httpPipeline; private final String sdkName; /** * Creates a new instance of {@link AzureServiceClient}. * * @param httpPipeline The HttpPipline used by the client. * @param serializerAdapter The SerializerAdapter used by the client. * @param environment The AzureEnvironment used by the client. */ protected AzureServiceClient(HttpPipeline httpPipeline, SerializerAdapter serializerAdapter, AzureEnvironment environment) { this.httpPipeline = httpPipeline; this.serializerAdapter = serializerAdapter; String packageName = this.getClass().getPackage().getName(); String implementationSegment = ".implementation"; if (packageName.endsWith(implementationSegment)) { packageName = packageName.substring(0, packageName.length() - implementationSegment.length()); } this.sdkName = packageName; } /** * Gets serializer adapter for JSON serialization/de-serialization. * * @return the serializer adapter. */ private SerializerAdapter getSerializerAdapter() { return this.serializerAdapter; } /** * Gets The HTTP pipeline to send requests through. * * @return the httpPipeline value. */ public HttpPipeline getHttpPipeline() { return this.httpPipeline; } /** * Gets The default poll interval for long-running operation. * * @return the defaultPollInterval value. */ public abstract Duration getDefaultPollInterval(); /** * Gets default client context. * * @return the default client context. */ public Context getContext() { return new Context("Sdk-Name", sdkName) .addData("Sdk-Version", SDK_VERSION); } /** * Merges default client context with provided context. * * @param context the context to be merged with default client context. * @return the merged context. */ /** * Gets long running operation result. * * @param lroInit the raw response of init operation. * @param httpPipeline the http pipeline. * @param pollResultType type of poll result. * @param finalResultType type of final result. * @param context the context shared by all requests. * @param <T> type of poll result. * @param <U> type of final result. * @return poller flux for poll result and final result. */ public <T, U> PollerFlux<PollResult<T>, U> getLroResult(Mono<Response<Flux<ByteBuffer>>> lroInit, HttpPipeline httpPipeline, Type pollResultType, Type finalResultType, Context context) { return PollerFactory.create( getSerializerAdapter(), httpPipeline, pollResultType, finalResultType, ResourceManagerUtils.InternalRuntimeContext.getDelayDuration(this.getDefaultPollInterval()), lroInit, context ); } /** * Gets the final result, or an error, based on last async poll response. * * @param response the last async poll response. * @param <T> type of poll result. * @param <U> type of final result. * @return the final result, or an error. */ public <T, U> Mono<U> getLroFinalResultOrError(AsyncPollResponse<PollResult<T>, U> response) { if (response.getStatus() != LongRunningOperationStatus.SUCCESSFULLY_COMPLETED) { String errorMessage; ManagementError managementError = null; HttpResponse errorResponse = null; PollResult.Error lroError = response.getValue().getError(); if (lroError != null) { errorResponse = new HttpResponseImpl(lroError.getResponseStatusCode(), lroError.getResponseHeaders(), lroError.getResponseBody()); errorMessage = response.getValue().getError().getMessage(); String errorBody = response.getValue().getError().getResponseBody(); if (errorBody != null) { try { managementError = this.getSerializerAdapter().deserialize( errorBody, ManagementError.class, SerializerEncoding.JSON); if (managementError.getCode() == null || managementError.getMessage() == null) { managementError = null; } } catch (IOException | RuntimeException ioe) { logger.logThrowableAsWarning(ioe); } } } else { errorMessage = "Long running operation failed."; } if (managementError == null) { managementError = new ManagementError(response.getStatus().toString(), errorMessage); } return Mono.error(new ManagementException(errorMessage, errorResponse, managementError)); } else { return response.getFinalResult(); } } private static class HttpResponseImpl extends HttpResponse { private final int statusCode; private final byte[] responseBody; private final HttpHeaders httpHeaders; HttpResponseImpl(int statusCode, HttpHeaders httpHeaders, String responseBody) { super(null); this.statusCode = statusCode; this.httpHeaders = httpHeaders; this.responseBody = responseBody == null ? null : responseBody.getBytes(StandardCharsets.UTF_8); } @Override public int getStatusCode() { return statusCode; } @Override public String getHeaderValue(String s) { return httpHeaders.getValue(s); } @Override public HttpHeaders getHeaders() { return httpHeaders; } @Override public Flux<ByteBuffer> getBody() { return Flux.just(ByteBuffer.wrap(responseBody)); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.just(responseBody); } @Override public Mono<String> getBodyAsString() { return Mono.just(new String(responseBody, StandardCharsets.UTF_8)); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.just(new String(responseBody, charset)); } } }
Consider parsing a date and comparing result OR validate that data provide inside the string is really a date, (i.e. I could write `@PlaybackOnly(expiryTime = "0")` to game the system).
private void validateExpiryTime(PlaybackOnly annotation) { String expiryStr = annotation.expiryTime(); if ("".equals(expiryStr)) { return; } OffsetDateTime now = OffsetDateTime.now(ZoneId.of(ZoneId.SHORT_IDS.get("PST"))); String nowStr = now.getYear() + "/" + String.format("%02d", now.getMonthValue()) + "/" + String.format("%02d", now.getDayOfMonth()); if (expiryStr.compareTo(nowStr) < 0) { throw new RuntimeException("PlaybackOnly has expired. Test must be reenabled"); } }
if (expiryStr.compareTo(nowStr) < 0) {
private void validateExpiryTime(PlaybackOnly annotation) { String expiryStr = annotation.expiryTime(); if ("".equals(expiryStr)) { return; } OffsetDateTime expiry = LocalDate.parse(expiryStr, DateTimeFormatter.ofPattern("yyyy-MM-dd")).atTime(0, 0) .atZone(ZoneId.of(ZoneId.SHORT_IDS.get("PST"))).toOffsetDateTime(); OffsetDateTime now = OffsetDateTime.now(ZoneId.of(ZoneId.SHORT_IDS.get("PST"))); if (now.isAfter(expiry)) { throw new RuntimeException("PlaybackOnly has expired. Test must be reenabled"); } }
class PlaybackOnlyExtension implements IAnnotationDrivenExtension<PlaybackOnly> { @Override public void visitFeatureAnnotation(PlaybackOnly annotation, FeatureInfo feature) { validateExpiryTime(annotation); TestMode testMode = TestEnvironment.getInstance().getTestMode(); if (testMode != TestMode.PLAYBACK) { feature.skip(String.format("Test ignored in %s mode", testMode)); } } @Override public void visitSpecAnnotation(PlaybackOnly annotation, SpecInfo spec) { validateExpiryTime(annotation); TestMode testMode = TestEnvironment.getInstance().getTestMode(); if (testMode != TestMode.PLAYBACK) { spec.skip(String.format("Test ignored in %s mode", testMode)); } } }
class PlaybackOnlyExtension implements IAnnotationDrivenExtension<PlaybackOnly> { @Override public void visitFeatureAnnotation(PlaybackOnly annotation, FeatureInfo feature) { validateExpiryTime(annotation); TestMode testMode = TestEnvironment.getInstance().getTestMode(); if (testMode != TestMode.PLAYBACK) { feature.skip(String.format("Test ignored in %s mode", testMode)); } } @Override public void visitSpecAnnotation(PlaybackOnly annotation, SpecInfo spec) { validateExpiryTime(annotation); TestMode testMode = TestEnvironment.getInstance().getTestMode(); if (testMode != TestMode.PLAYBACK) { spec.skip(String.format("Test ignored in %s mode", testMode)); } } }
Why do we need `totalLength`? Do we set the cache capacity based on the total number of chars in the cache?
int getTotalLength() { return totalLength; }
}
int getTotalLength() { synchronized (lock) { return cache.getTotalLength(); } }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. If it is larger than its counterpart. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. * TODO (conniey): When https: */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } /** * Gets the schema id of a matching schema. * * @param schema Schema to get entry for. * @return The schema id or null if it does not exist in the cache. */ String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
It looks like the decision to evict is based on the number of entries in the cache. So, how is `totalLength` used?
protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; }
final boolean removingEntry = size() > capacity;
protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; }
class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. If it is larger than its counterpart. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override }
class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } /** * Gets the schema id of a matching schema. * * @param schema Schema to get entry for. * @return The schema id or null if it does not exist in the cache. */ String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override }
Have we perf tested the impact of locking in Event Hubs scenario where every event will do a getSchema for deserializing the event?
Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); }
synchronized (lock) {
Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ int getTotalLength() { synchronized (lock) { return cache.getTotalLength(); } } /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. If it is larger than its counterpart. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ int getTotalLength() { synchronized (lock) { return cache.getTotalLength(); } } /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. * TODO (conniey): When https: */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } /** * Gets the schema id of a matching schema. * * @param schema Schema to get entry for. * @return The schema id or null if it does not exist in the cache. */ String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
I think this is required to keep it thread-safe. My guidance around this was from the Javadocs: > Note that this implementation is not synchronized. If multiple threads access a linked hash map concurrently, and at least one of the threads modifies the map structurally, it must be synchronized externally. This is typically accomplished by synchronizing on some object that naturally encapsulates the map. If no such object exists, the map should be "wrapped" using the [Collections.synchronizedMap](https://docs.oracle.com/javase/8/docs/api/java/util/Collections.html#synchronizedMap-java.util.Map-) method. This is best done at creation time, to prevent accidental unsynchronized access to the map: If we're comparing perf before and after this implementation, there is a large win because of the constant network calls we do to the service for a schema. (I have another branch for the perf tests)
Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); }
synchronized (lock) {
Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ int getTotalLength() { synchronized (lock) { return cache.getTotalLength(); } } /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. If it is larger than its counterpart. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ int getTotalLength() { synchronized (lock) { return cache.getTotalLength(); } } /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. * TODO (conniey): When https: */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } /** * Gets the schema id of a matching schema. * * @param schema Schema to get entry for. * @return The schema id or null if it does not exist in the cache. */ String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
The feature crew wanted this as logging in the case a customer sees their app is huge and wants to know how much of it the cache is taking. So they can have a measurable number.
int getTotalLength() { return totalLength; }
}
int getTotalLength() { synchronized (lock) { return cache.getTotalLength(); } }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. If it is larger than its counterpart. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. * TODO (conniey): When https: */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } /** * Gets the schema id of a matching schema. * * @param schema Schema to get entry for. * @return The schema id or null if it does not exist in the cache. */ String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
If we evict an entry from the cache, then the entry we removed should decrement from the totalLength, since its schema is no longer in the map.
protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; }
final boolean removingEntry = size() > capacity;
protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; }
class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. If it is larger than its counterpart. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override }
class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } /** * Gets the schema id of a matching schema. * * @param schema Schema to get entry for. * @return The schema id or null if it does not exist in the cache. */ String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override }
If the cache is taking up too much space, should we have APIs for customers to clear the cache, plugin their own or disable the cache?
int getTotalLength() { return totalLength; }
}
int getTotalLength() { synchronized (lock) { return cache.getTotalLength(); } }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. If it is larger than its counterpart. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. * TODO (conniey): When https: */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } /** * Gets the schema id of a matching schema. * * @param schema Schema to get entry for. * @return The schema id or null if it does not exist in the cache. */ String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
Yeah, I think you answered my question in the previous comment, I was looking at why totalLength was needed if the eviction policy is based on count of entries and not totalLength.
protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; }
final boolean removingEntry = size() > capacity;
protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; }
class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. If it is larger than its counterpart. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override }
class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } /** * Gets the schema id of a matching schema. * * @param schema Schema to get entry for. * @return The schema id or null if it does not exist in the cache. */ String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override }
This is something post GA they want to get customer data for before exposing more API... (ie. Does the cache take up too much space?)
int getTotalLength() { return totalLength; }
}
int getTotalLength() { synchronized (lock) { return cache.getTotalLength(); } }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. If it is larger than its counterpart. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
class SchemaRegistrySchemaCache { private static final String SIZE_KEY = "size"; private static final String TOTAL_LENGTH_KEY = "totalLength"; private final ClientLogger logger = new ClientLogger(SchemaRegistrySchemaCache.class); private final SchemaCache cache; private final SchemaRegistryAsyncClient schemaRegistryClient; private final String schemaGroup; private final boolean autoRegisterSchemas; private final Object lock = new Object(); SchemaRegistrySchemaCache(SchemaRegistryAsyncClient schemaRegistryClient, String schemaGroup, boolean autoRegisterSchemas, int capacity) { this.schemaRegistryClient = schemaRegistryClient; this.schemaGroup = schemaGroup; this.autoRegisterSchemas = autoRegisterSchemas; this.cache = new SchemaCache(capacity); } Mono<String> getSchemaId(Schema schema) { final String existingSchemaId; synchronized (lock) { existingSchemaId = cache.getSchemaId(schema); } if (existingSchemaId != null) { return Mono.just(existingSchemaId); } final String schemaFullName = schema.getFullName(); final String schemaString = schema.toString(); if (CoreUtils.isNullOrEmpty(schemaGroup)) { return monoError(logger, new IllegalStateException("Cannot serialize when 'schemaGroup' is not set. Please" + "set in SchemaRegistryApacheAvroSerializer.schemaGroup when creating serializer.")); } final Mono<SchemaProperties> serviceCall; if (autoRegisterSchemas) { serviceCall = this.schemaRegistryClient .registerSchema(schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } else { serviceCall = this.schemaRegistryClient.getSchemaProperties( schemaGroup, schemaFullName, schemaString, SchemaFormat.AVRO); } return serviceCall.map(properties -> { final String schemaId = properties.getId(); synchronized (lock) { cache.put(schemaId, schema); logCacheStatus(); } return schemaId; }); } Mono<Schema> getSchema(String schemaId) { synchronized (lock) { final Schema existing = cache.get(schemaId); if (existing != null) { return Mono.just(existing); } } return schemaRegistryClient.getSchema(schemaId) .handle((registryObject, sink) -> { final String schemaString = registryObject.getDefinition(); final Schema parsedSchema; synchronized (lock) { parsedSchema = new Schema.Parser().parse(schemaString); cache.put(schemaId, parsedSchema); logCacheStatus(); } sink.next(parsedSchema); }); } /** * Gets number of cached schemas. * * @return Number of cached schemas. */ int getSize() { synchronized (lock) { return cache.size(); } } /** * Gets the length of schemas stored in cache. * * @return The length of schemas stored in cache. */ /** * Logs the cache status if log level verbose is enabled. Otherwise, no-op. */ private void logCacheStatus() { if (!logger.canLogAtLevel(LogLevel.VERBOSE)) { return; } final int size = cache.size(); final int length = cache.getTotalLength(); logger.atVerbose() .addKeyValue(SIZE_KEY, size) .addKeyValue(TOTAL_LENGTH_KEY, length) .log("Cache entry added or updated. Total number of entries: {}; Total schema length: {}", size, length); } /** * Simple LRU cache. Accesses to cache are synchronized via the outer class lock. * TODO (conniey): When https: */ private static final class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } /** * Gets the schema id of a matching schema. * * @param schema Schema to get entry for. * @return The schema id or null if it does not exist in the cache. */ String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; } } }
Oh yeah... They wanted the totalLength to estimate "how big is this cache?" since a Schema can be fairly large and may be a single entry in the cache.
protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; }
final boolean removingEntry = size() > capacity;
protected boolean removeEldestEntry(Map.Entry<String, Schema> eldest) { final boolean removingEntry = size() > capacity; if (removingEntry) { final Schema value = eldest.getValue(); totalLength = totalLength - value.toString().length(); schemaToIdCache.remove(value); } return removingEntry; }
class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. If it is larger than its counterpart. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override }
class SchemaCache extends LinkedHashMap<String, Schema> { private static final long serialVersionUID = -1L; private final int capacity; private final HashMap<Schema, String> schemaToIdCache = new HashMap<>(); private int totalLength; /** * Creates an LRU cache with maximum capacity. * * @param capacity Max size (number of entries) of the cache. */ SchemaCache(int capacity) { super(64, 0.75f, true); this.capacity = capacity; } int getTotalLength() { return totalLength; } /** * Gets the schema id of a matching schema. * * @param schema Schema to get entry for. * @return The schema id or null if it does not exist in the cache. */ String getSchemaId(Schema schema) { final String schemaId = schemaToIdCache.get(schema); if (schemaId != null) { super.get(schemaId); } return schemaId; } /** * Adds a schema keyed by its schema id. */ @Override public Schema put(String schemaId, Schema value) { final Schema existing = super.put(schemaId, value); final int currentLength = value.toString().length(); if (existing == null) { totalLength = totalLength + currentLength; } else { final int difference = currentLength - existing.toString().length(); totalLength = totalLength - difference; } schemaToIdCache.put(value, schemaId); return existing; } @Override }
Is there a constant netty has that we can use here instead of a string literal?
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast("reactor.left.chunkedWriter", new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
c.addHandlerLast("reactor.left.chunkedWriter", new ChunkedWriteHandler());
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
class NettyAsyncHttpClient implements HttpClient { private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { FileContent fileContent = (FileContent) bodyContent; return reactorNettyOutbound.sendFile(fileContent.getFile(), 0, fileContent.getLength()); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
class NettyAsyncHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(NettyAsyncHttpClient.class); private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { return sendFile(restRequest, reactorNettyOutbound, (FileContent) bodyContent); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } private static NettyOutbound sendFile( HttpRequest restRequest, NettyOutbound reactorNettyOutbound, FileContent fileContent) { if (restRequest.getUrl().getProtocol().equals("https")) { return reactorNettyOutbound.sendUsing( () -> FileChannel.open(fileContent.getFile(), StandardOpenOption.READ), (c, fc) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } try { return new ChunkedNioFile( fc, 0, fileContent.getLength(), fileContent.getChunkSize()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }, (fc) -> { try { fc.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }); } else { return reactorNettyOutbound.sendFile( fileContent.getFile(), 0, fileContent.getLength()); } } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
What should be our guidance on when this stream can be safely closed by the user?
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast("reactor.left.chunkedWriter", new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
class NettyAsyncHttpClient implements HttpClient { private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { FileContent fileContent = (FileContent) bodyContent; return reactorNettyOutbound.sendFile(fileContent.getFile(), 0, fileContent.getLength()); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
class NettyAsyncHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(NettyAsyncHttpClient.class); private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { return sendFile(restRequest, reactorNettyOutbound, (FileContent) bodyContent); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } private static NettyOutbound sendFile( HttpRequest restRequest, NettyOutbound reactorNettyOutbound, FileContent fileContent) { if (restRequest.getUrl().getProtocol().equals("https")) { return reactorNettyOutbound.sendUsing( () -> FileChannel.open(fileContent.getFile(), StandardOpenOption.READ), (c, fc) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } try { return new ChunkedNioFile( fc, 0, fileContent.getLength(), fileContent.getChunkSize()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }, (fc) -> { try { fc.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }); } else { return reactorNettyOutbound.sendFile( fileContent.getFile(), 0, fileContent.getLength()); } } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
What happens in the case of retry and the `InputStream` is not replayable?
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast("reactor.left.chunkedWriter", new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
bodyContent::toStream,
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
class NettyAsyncHttpClient implements HttpClient { private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { FileContent fileContent = (FileContent) bodyContent; return reactorNettyOutbound.sendFile(fileContent.getFile(), 0, fileContent.getLength()); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
class NettyAsyncHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(NettyAsyncHttpClient.class); private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { return sendFile(restRequest, reactorNettyOutbound, (FileContent) bodyContent); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } private static NettyOutbound sendFile( HttpRequest restRequest, NettyOutbound reactorNettyOutbound, FileContent fileContent) { if (restRequest.getUrl().getProtocol().equals("https")) { return reactorNettyOutbound.sendUsing( () -> FileChannel.open(fileContent.getFile(), StandardOpenOption.READ), (c, fc) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } try { return new ChunkedNioFile( fc, 0, fileContent.getLength(), fileContent.getChunkSize()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }, (fc) -> { try { fc.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }); } else { return reactorNettyOutbound.sendFile( fileContent.getFile(), 0, fileContent.getLength()); } } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
yeah, there's one. I'll give it a shot.
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast("reactor.left.chunkedWriter", new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
c.addHandlerLast("reactor.left.chunkedWriter", new ChunkedWriteHandler());
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
class NettyAsyncHttpClient implements HttpClient { private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { FileContent fileContent = (FileContent) bodyContent; return reactorNettyOutbound.sendFile(fileContent.getFile(), 0, fileContent.getLength()); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
class NettyAsyncHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(NettyAsyncHttpClient.class); private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { return sendFile(restRequest, reactorNettyOutbound, (FileContent) bodyContent); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } private static NettyOutbound sendFile( HttpRequest restRequest, NettyOutbound reactorNettyOutbound, FileContent fileContent) { if (restRequest.getUrl().getProtocol().equals("https")) { return reactorNettyOutbound.sendUsing( () -> FileChannel.open(fileContent.getFile(), StandardOpenOption.READ), (c, fc) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } try { return new ChunkedNioFile( fc, 0, fileContent.getLength(), fileContent.getChunkSize()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }, (fc) -> { try { fc.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }); } else { return reactorNettyOutbound.sendFile( fileContent.getFile(), 0, fileContent.getLength()); } } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
whenever HttpClient.send emits response. By that time body is consumed. Or in case of failure they get opportunity to rewind stream and re-attempt request.
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast("reactor.left.chunkedWriter", new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
class NettyAsyncHttpClient implements HttpClient { private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { FileContent fileContent = (FileContent) bodyContent; return reactorNettyOutbound.sendFile(fileContent.getFile(), 0, fileContent.getLength()); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
class NettyAsyncHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(NettyAsyncHttpClient.class); private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { return sendFile(restRequest, reactorNettyOutbound, (FileContent) bodyContent); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } private static NettyOutbound sendFile( HttpRequest restRequest, NettyOutbound reactorNettyOutbound, FileContent fileContent) { if (restRequest.getUrl().getProtocol().equals("https")) { return reactorNettyOutbound.sendUsing( () -> FileChannel.open(fileContent.getFile(), StandardOpenOption.READ), (c, fc) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } try { return new ChunkedNioFile( fc, 0, fileContent.getLength(), fileContent.getChunkSize()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }, (fc) -> { try { fc.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }); } else { return reactorNettyOutbound.sendFile( fileContent.getFile(), 0, fileContent.getLength()); } } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
It's going to be caller responsibility to rewind body before next attempt. Reactor-Netty doesn't attempt to retransmit if headers or body were already in flight.
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast("reactor.left.chunkedWriter", new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
bodyContent::toStream,
private static NettyOutbound sendInputStream(NettyOutbound reactorNettyOutbound, InputStreamContent bodyContent) { return reactorNettyOutbound.sendUsing( bodyContent::toStream, (c, stream) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } return new ChunkedStream(stream); }, (stream) -> { }); }
class NettyAsyncHttpClient implements HttpClient { private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { FileContent fileContent = (FileContent) bodyContent; return reactorNettyOutbound.sendFile(fileContent.getFile(), 0, fileContent.getLength()); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
class NettyAsyncHttpClient implements HttpClient { private static final ClientLogger LOGGER = new ClientLogger(NettyAsyncHttpClient.class); private static final String AZURE_EAGERLY_READ_RESPONSE = "azure-eagerly-read-response"; private static final String AZURE_RESPONSE_TIMEOUT = "azure-response-timeout"; final boolean disableBufferCopy; final long readTimeout; final long writeTimeout; final long responseTimeout; final reactor.netty.http.client.HttpClient nettyClient; /** * Creates NettyAsyncHttpClient with provided http client. * * @param nettyClient the reactor-netty http client * @param disableBufferCopy Determines whether deep cloning of response buffers should be disabled. */ NettyAsyncHttpClient(reactor.netty.http.client.HttpClient nettyClient, boolean disableBufferCopy, long readTimeout, long writeTimeout, long responseTimeout) { this.nettyClient = nettyClient; this.disableBufferCopy = disableBufferCopy; this.readTimeout = readTimeout; this.writeTimeout = writeTimeout; this.responseTimeout = responseTimeout; } /** * {@inheritDoc} */ @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { Objects.requireNonNull(request.getHttpMethod(), "'request.getHttpMethod()' cannot be null."); Objects.requireNonNull(request.getUrl(), "'request.getUrl()' cannot be null."); Objects.requireNonNull(request.getUrl().getProtocol(), "'request.getUrl().getProtocol()' cannot be null."); boolean effectiveEagerlyReadResponse = (boolean) context.getData(AZURE_EAGERLY_READ_RESPONSE).orElse(false); long effectiveResponseTimeout = context.getData(AZURE_RESPONSE_TIMEOUT) .filter(timeoutDuration -> timeoutDuration instanceof Duration) .map(timeoutDuration -> ((Duration) timeoutDuration).toMillis()) .orElse(this.responseTimeout); return nettyClient .doOnRequest((r, connection) -> addWriteTimeoutHandler(connection, writeTimeout)) .doAfterRequest((r, connection) -> addResponseTimeoutHandler(connection, effectiveResponseTimeout)) .doOnResponse((response, connection) -> addReadTimeoutHandler(connection, readTimeout)) .doAfterResponseSuccess((response, connection) -> removeReadTimeoutHandler(connection)) .request(HttpMethod.valueOf(request.getHttpMethod().toString())) .uri(request.getUrl().toString()) .send(bodySendDelegate(request)) .responseConnection(responseDelegate(request, disableBufferCopy, effectiveEagerlyReadResponse)) .single() .onErrorMap(throwable -> { if (throwable instanceof SSLException) { if (throwable.getCause() instanceof ProxyConnectException) { return throwable.getCause(); } } return throwable; }) .retryWhen(Retry.max(1).filter(throwable -> throwable instanceof ProxyConnectException) .onRetryExhaustedThrow((ignoredSpec, signal) -> signal.failure())); } /** * Delegate to send the request content. * * @param restRequest the Rest request contains the body to be sent * @return a delegate upon invocation sets the request body in reactor-netty outbound object */ private static BiFunction<HttpClientRequest, NettyOutbound, Publisher<Void>> bodySendDelegate( final HttpRequest restRequest) { return (reactorNettyRequest, reactorNettyOutbound) -> { for (HttpHeader hdr : restRequest.getHeaders()) { if (reactorNettyRequest.requestHeaders().contains(hdr.getName())) { boolean first = true; for (String value : hdr.getValuesList()) { if (first) { first = false; reactorNettyRequest.header(hdr.getName(), value); } else { reactorNettyRequest.addHeader(hdr.getName(), value); } } } else { hdr.getValuesList().forEach(value -> reactorNettyRequest.addHeader(hdr.getName(), value)); } } BinaryData body = restRequest.getBodyAsBinaryData(); if (body != null) { BinaryDataContent bodyContent = BinaryDataHelper.getContent(body); if (bodyContent instanceof ByteArrayContent) { return reactorNettyOutbound.send(Mono.just(Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof StringContent || bodyContent instanceof SerializableContent) { return reactorNettyOutbound.send( Mono.fromSupplier(() -> Unpooled.wrappedBuffer(bodyContent.toBytes()))); } else if (bodyContent instanceof FileContent) { return sendFile(restRequest, reactorNettyOutbound, (FileContent) bodyContent); } else if (bodyContent instanceof InputStreamContent) { return sendInputStream(reactorNettyOutbound, (InputStreamContent) bodyContent); } else { Flux<ByteBuf> nettyByteBufFlux = restRequest.getBody().map(Unpooled::wrappedBuffer); return reactorNettyOutbound.send(nettyByteBufFlux); } } else { return reactorNettyOutbound; } }; } private static NettyOutbound sendFile( HttpRequest restRequest, NettyOutbound reactorNettyOutbound, FileContent fileContent) { if (restRequest.getUrl().getProtocol().equals("https")) { return reactorNettyOutbound.sendUsing( () -> FileChannel.open(fileContent.getFile(), StandardOpenOption.READ), (c, fc) -> { if (c.channel().pipeline().get(ChunkedWriteHandler.class) == null) { c.addHandlerLast(NettyPipeline.ChunkedWriter, new ChunkedWriteHandler()); } try { return new ChunkedNioFile( fc, 0, fileContent.getLength(), fileContent.getChunkSize()); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }, (fc) -> { try { fc.close(); } catch (IOException e) { throw LOGGER.logExceptionAsError(new UncheckedIOException(e)); } }); } else { return reactorNettyOutbound.sendFile( fileContent.getFile(), 0, fileContent.getLength()); } } /** * Delegate to receive response. * * @param restRequest the Rest request whose response this delegate handles * @param disableBufferCopy Flag indicating if the network response shouldn't be buffered. * @param eagerlyReadResponse Flag indicating if the network response should be eagerly read into memory. * @return a delegate upon invocation setup Rest response object */ private static BiFunction<HttpClientResponse, Connection, Publisher<HttpResponse>> responseDelegate( final HttpRequest restRequest, final boolean disableBufferCopy, final boolean eagerlyReadResponse) { return (reactorNettyResponse, reactorNettyConnection) -> { /* * If the response is being eagerly read into memory the flag for buffer copying can be ignored as the * response MUST be deeply copied to ensure it can safely be used downstream. */ if (eagerlyReadResponse) { return FluxUtil.collectBytesFromNetworkResponse( reactorNettyConnection.inbound().receive().asByteBuffer(), new NettyToAzureCoreHttpHeadersWrapper(reactorNettyResponse.responseHeaders())) .doFinally(ignored -> closeConnection(reactorNettyConnection)) .map(bytes -> new NettyAsyncHttpBufferedResponse(reactorNettyResponse, restRequest, bytes)); } else { return Mono.just(new NettyAsyncHttpResponse(reactorNettyResponse, reactorNettyConnection, restRequest, disableBufferCopy)); } }; } /* * Adds write timeout handler once the request is ready to begin sending. */ private static void addWriteTimeoutHandler(Connection connection, long timeoutMillis) { connection.addHandlerLast(WriteTimeoutHandler.HANDLER_NAME, new WriteTimeoutHandler(timeoutMillis)); } /* * Remove write timeout handler from the connection as the request has finished sending, then add response timeout * handler. */ private static void addResponseTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(WriteTimeoutHandler.HANDLER_NAME) .addHandlerLast(ResponseTimeoutHandler.HANDLER_NAME, new ResponseTimeoutHandler(timeoutMillis)); } /* * Remove response timeout handler from the connection as the response has been received, then add read timeout * handler. */ private static void addReadTimeoutHandler(Connection connection, long timeoutMillis) { connection.removeHandler(ResponseTimeoutHandler.HANDLER_NAME) .addHandlerLast(ReadTimeoutHandler.HANDLER_NAME, new ReadTimeoutHandler(timeoutMillis)); } /* * Remove read timeout handler as the complete response has been received. */ private static void removeReadTimeoutHandler(Connection connection) { connection.removeHandler(ReadTimeoutHandler.HANDLER_NAME); } }
```suggestion // If the x-ms-date exists ignore the Date header. ```
private static String buildStringToSign(URL requestUrl, HttpHeaders headers, AzureNamedKeyCredential credential) { String dateHeader = headers.get("x-ms-date") != null ? "" : getStandardHeaderValue(headers, "Date"); return String.join("\n", dateHeader, getCanonicalizedResource(requestUrl, credential)); }
private static String buildStringToSign(URL requestUrl, HttpHeaders headers, AzureNamedKeyCredential credential) { String dateHeader = headers.get("x-ms-date") != null ? "" : getStandardHeaderValue(headers, "Date"); return String.join("\n", dateHeader, getCanonicalizedResource(requestUrl, credential)); }
class TableAzureNamedKeyCredentialPolicy implements HttpPipelinePolicy { private static final String AUTHORIZATION_HEADER_FORMAT = "SharedKeyLite %s:%s"; private final AzureNamedKeyCredential credential; /** * Creates a SharedKey pipeline policy that adds the SharedKey into the request's authorization header. * * @param credential The SharedKey credential used to create the policy. */ public TableAzureNamedKeyCredentialPolicy(AzureNamedKeyCredential credential) { this.credential = credential; } /** * Authorizes a {@link com.azure.core.http.HttpRequest} with the SharedKey credential. * * @param context The context of the request. * @param next The next policy in the pipeline. * * @return A reactive result containing the HTTP response. */ public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String authorizationValue = generateAuthorizationHeader(context.getHttpRequest().getUrl(), context.getHttpRequest().getHeaders()); context.getHttpRequest().setHeader("Authorization", authorizationValue); return next.process(); } /** * Generates the Auth Headers * * @param requestUrl The URL which the request is going to. * @param headers The headers of the request. * * @return The auth header */ String generateAuthorizationHeader(URL requestUrl, HttpHeaders headers) { String signature = computeHmac256(this.credential.getAzureNamedKey().getKey(), buildStringToSign(requestUrl, headers, this.credential)); return String.format(AUTHORIZATION_HEADER_FORMAT, this.credential.getAzureNamedKey().getName(), signature); } /** * Creates the String to Sign. * * @param requestUrl The Url which the request is going to. * @param headers The headers of the request. * * @return A string to sign for the request. */ /** * Returns a header value or an empty string if said value is {@code null}. * * @param headers The request headers. * @param headerName The name of the header to get the value for. * * @return The standard header for the given name. */ private static String getStandardHeaderValue(HttpHeaders headers, String headerName) { final Header header = headers.get(headerName); return header == null ? "" : header.getValue(); } /** * Returns the canonicalized resource needed for a request. * * @param requestUrl The URL of the request. * * @return The string that is the canonicalized resource. */ private static String getCanonicalizedResource(URL requestUrl, AzureNamedKeyCredential credential) { StringBuilder canonicalizedResource = new StringBuilder("/").append(credential.getAzureNamedKey().getName()); if (requestUrl.getPath().length() > 0) { canonicalizedResource.append(requestUrl.getPath()); } else { canonicalizedResource.append('/'); } if (requestUrl.getQuery() != null) { Map<String, String[]> queryParams = parseQueryStringSplitValues(requestUrl.getQuery()); String[] queryParamValues = queryParams.get("comp"); if (queryParamValues != null) { Arrays.sort(queryParamValues); canonicalizedResource.append("?comp=") .append(String.join(",", queryParamValues)); } } return canonicalizedResource.toString(); } /** * Get the {@link AzureNamedKeyCredential} linked to the policy. * * @return The {@link AzureNamedKeyCredential}. */ public AzureNamedKeyCredential getCredential() { return credential; } }
class TableAzureNamedKeyCredentialPolicy implements HttpPipelinePolicy { private static final String AUTHORIZATION_HEADER_FORMAT = "SharedKeyLite %s:%s"; private final AzureNamedKeyCredential credential; /** * Creates a SharedKey pipeline policy that adds the SharedKey into the request's authorization header. * * @param credential The SharedKey credential used to create the policy. */ public TableAzureNamedKeyCredentialPolicy(AzureNamedKeyCredential credential) { this.credential = credential; } /** * Authorizes a {@link com.azure.core.http.HttpRequest} with the SharedKey credential. * * @param context The context of the request. * @param next The next policy in the pipeline. * * @return A reactive result containing the HTTP response. */ public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String authorizationValue = generateAuthorizationHeader(context.getHttpRequest().getUrl(), context.getHttpRequest().getHeaders()); context.getHttpRequest().setHeader("Authorization", authorizationValue); return next.process(); } /** * Generates the Auth Headers * * @param requestUrl The URL which the request is going to. * @param headers The headers of the request. * * @return The auth header */ String generateAuthorizationHeader(URL requestUrl, HttpHeaders headers) { String signature = computeHmac256(this.credential.getAzureNamedKey().getKey(), buildStringToSign(requestUrl, headers, this.credential)); return String.format(AUTHORIZATION_HEADER_FORMAT, this.credential.getAzureNamedKey().getName(), signature); } /** * Creates the String to Sign. * * @param requestUrl The Url which the request is going to. * @param headers The headers of the request. * * @return A string to sign for the request. */ /** * Returns a header value or an empty string if said value is {@code null}. * * @param headers The request headers. * @param headerName The name of the header to get the value for. * * @return The standard header for the given name. */ private static String getStandardHeaderValue(HttpHeaders headers, String headerName) { final Header header = headers.get(headerName); return header == null ? "" : header.getValue(); } /** * Returns the canonicalized resource needed for a request. * * @param requestUrl The URL of the request. * * @return The string that is the canonicalized resource. */ private static String getCanonicalizedResource(URL requestUrl, AzureNamedKeyCredential credential) { StringBuilder canonicalizedResource = new StringBuilder("/").append(credential.getAzureNamedKey().getName()); if (requestUrl.getPath().length() > 0) { canonicalizedResource.append(requestUrl.getPath()); } else { canonicalizedResource.append('/'); } if (requestUrl.getQuery() != null) { Map<String, String[]> queryParams = parseQueryStringSplitValues(requestUrl.getQuery()); String[] queryParamValues = queryParams.get("comp"); if (queryParamValues != null) { Arrays.sort(queryParamValues); canonicalizedResource.append("?comp=") .append(String.join(",", queryParamValues)); } } return canonicalizedResource.toString(); } /** * Get the {@link AzureNamedKeyCredential} linked to the policy. * * @return The {@link AzureNamedKeyCredential}. */ public AzureNamedKeyCredential getCredential() { return credential; } }
I'm wondering if, for now, it'd be better to always go down the code path above to just have this being consistent and easier to debug while we introduce this change
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return Mono.just(RequestBody.create(ByteString.EMPTY, mediaType)); } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody((InputStreamContent) content, headers, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, headers, mediaType)); } else { OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody( content, headers, mediaType, writeTimeout); if (fluxRequestBody.contentLength() < 0 || fluxRequestBody.contentLength() > BUFFERED_FLUX_REQUEST_THRESHOLD) { return Mono.just(fluxRequestBody); } else { return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType)); } } }
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return EMPTY_REQUEST_BODY_MONO; } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else { long effectiveContentLength = getRequestContentLength(content, headers); if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody( (InputStreamContent) content, effectiveContentLength, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, effectiveContentLength, mediaType)); } else { return Mono.just(new OkHttpFluxRequestBody( content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis())); } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<okio.ByteString> EMPTY_BYTE_STRING_MONO = Mono.just(okio.ByteString.EMPTY); private static final long BUFFERED_FLUX_REQUEST_THRESHOLD = 10 * 1024L; final OkHttpClient httpClient; private final Duration writeTimeout; OkHttpAsyncHttpClient(OkHttpClient httpClient, Duration writeTimeout) { this.httpClient = httpClient; this.writeTimeout = writeTimeout; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ /** * Aggregate Flux of java.nio.ByteBuffer to single okio.ByteString. * * Pooled okio.Buffer type is used to buffer emitted ByteBuffer instances. Content of each ByteBuffer will be * written (i.e copied) to the internal okio.Buffer slots. Once the stream terminates, the contents of all slots get * copied to one single byte array and okio.ByteString will be created referring this byte array. Finally, the * initial okio.Buffer will be returned to the pool. * * @param bbFlux the Flux of ByteBuffer to aggregate * @return a mono emitting aggregated ByteString */ private static Mono<ByteString> toByteString(Flux<ByteBuffer> bbFlux) { Objects.requireNonNull(bbFlux, "'bbFlux' cannot be null."); return Mono.using(okio.Buffer::new, buffer -> bbFlux.reduce(buffer, (b, byteBuffer) -> { try { b.write(byteBuffer); return b; } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }).map(b -> ByteString.of(b.readByteArray())), okio.Buffer::clear) .switchIfEmpty(EMPTY_BYTE_STRING_MONO); } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { if (e.getMessage().startsWith("canceled due to") && e.getSuppressed().length == 1) { sink.error(e.getSuppressed()[0]); } else { sink.error(e); } } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<RequestBody> EMPTY_REQUEST_BODY_MONO = Mono.just(RequestBody.create(new byte[0])); final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) { Long contentLength = content.getLength(); if (contentLength == null) { String contentLengthHeaderValue = headers.getValue("Content-Length"); if (contentLengthHeaderValue != null) { contentLength = Long.parseLong(contentLengthHeaderValue); } else { contentLength = -1L; } } return contentLength; } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { sink.error(e); } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
Is this timeout in the right place? I'd have expected the write timeout to behave closer to the network layer or in the `sink.write` above. Maybe ```java Mono<BufferedSink> requestSendMono = content.toFluxByteBuffer() .publishOn(Schedulers.boundedElastic()) .timeout(writeTimeout) .reduce(....); ```
public void writeTo(BufferedSink bufferedSink) throws IOException { if (bodySent.compareAndSet(false, true)) { Mono<BufferedSink> requestSendMono = content.toFluxByteBuffer() .publishOn(Schedulers.boundedElastic()) .reduce(bufferedSink, (sink, buffer) -> { try { while (buffer.hasRemaining()) { sink.write(buffer); } return sink; } catch (IOException e) { throw Exceptions.propagate(e); } }); if (writeTimeout != null) { requestSendMono.block(writeTimeout); } else { requestSendMono.block(); } } else { throw LOGGER.logThrowableAsError(new IOException("Re-attempt to send Flux body is not supported")); } }
requestSendMono.block(writeTimeout);
public void writeTo(BufferedSink bufferedSink) throws IOException { if (bodySent.compareAndSet(false, true)) { Mono<Void> requestSendMono = content.toFluxByteBuffer() .flatMapSequential(buffer -> { if (Schedulers.isInNonBlockingThread()) { return Mono.just(buffer) .publishOn(Schedulers.boundedElastic()) .map(b -> writeBuffer(bufferedSink, b)) .then(); } else { writeBuffer(bufferedSink, buffer); return Mono.empty(); } }, 1, 1) .then(); if (callTimeoutMillis > 0) { /* * Default call timeout (in milliseconds). By default there is no timeout for complete calls, but * there is for the connection, write, and read actions within a call. */ requestSendMono.block(Duration.ofMillis(callTimeoutMillis)); } else { requestSendMono.block(); } } else { throw LOGGER.logThrowableAsError(new IOException("Re-attempt to send Flux body is not supported")); } }
class OkHttpFluxRequestBody extends OkHttpStreamableRequestBody<BinaryDataContent> { private static final ClientLogger LOGGER = new ClientLogger(OkHttpFluxRequestBody.class); private final AtomicBoolean bodySent = new AtomicBoolean(false); private final Duration writeTimeout; public OkHttpFluxRequestBody( BinaryDataContent content, HttpHeaders httpHeaders, MediaType mediaType, Duration writeTimeout) { super(content, httpHeaders, mediaType); this.writeTimeout = writeTimeout; } @Override }
class OkHttpFluxRequestBody extends OkHttpStreamableRequestBody<BinaryDataContent> { private static final ClientLogger LOGGER = new ClientLogger(OkHttpFluxRequestBody.class); private final AtomicBoolean bodySent = new AtomicBoolean(false); private final int callTimeoutMillis; public OkHttpFluxRequestBody( BinaryDataContent content, long effectiveContentLength, MediaType mediaType, int callTimeoutMillis) { super(content, effectiveContentLength, mediaType); this.callTimeoutMillis = callTimeoutMillis; } @Override private ByteBuffer writeBuffer(BufferedSink sink, ByteBuffer buffer) { try { while (buffer.hasRemaining()) { sink.write(buffer); } return buffer; } catch (IOException e) { throw Exceptions.propagate(e); } } }
If this path is removed, could this API become non-reactive?
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return Mono.just(RequestBody.create(ByteString.EMPTY, mediaType)); } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody((InputStreamContent) content, headers, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, headers, mediaType)); } else { OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody( content, headers, mediaType, writeTimeout); if (fluxRequestBody.contentLength() < 0 || fluxRequestBody.contentLength() > BUFFERED_FLUX_REQUEST_THRESHOLD) { return Mono.just(fluxRequestBody); } else { return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType)); } } }
return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType));
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return EMPTY_REQUEST_BODY_MONO; } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else { long effectiveContentLength = getRequestContentLength(content, headers); if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody( (InputStreamContent) content, effectiveContentLength, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, effectiveContentLength, mediaType)); } else { return Mono.just(new OkHttpFluxRequestBody( content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis())); } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<okio.ByteString> EMPTY_BYTE_STRING_MONO = Mono.just(okio.ByteString.EMPTY); private static final long BUFFERED_FLUX_REQUEST_THRESHOLD = 10 * 1024L; final OkHttpClient httpClient; private final Duration writeTimeout; OkHttpAsyncHttpClient(OkHttpClient httpClient, Duration writeTimeout) { this.httpClient = httpClient; this.writeTimeout = writeTimeout; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ /** * Aggregate Flux of java.nio.ByteBuffer to single okio.ByteString. * * Pooled okio.Buffer type is used to buffer emitted ByteBuffer instances. Content of each ByteBuffer will be * written (i.e copied) to the internal okio.Buffer slots. Once the stream terminates, the contents of all slots get * copied to one single byte array and okio.ByteString will be created referring this byte array. Finally, the * initial okio.Buffer will be returned to the pool. * * @param bbFlux the Flux of ByteBuffer to aggregate * @return a mono emitting aggregated ByteString */ private static Mono<ByteString> toByteString(Flux<ByteBuffer> bbFlux) { Objects.requireNonNull(bbFlux, "'bbFlux' cannot be null."); return Mono.using(okio.Buffer::new, buffer -> bbFlux.reduce(buffer, (b, byteBuffer) -> { try { b.write(byteBuffer); return b; } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }).map(b -> ByteString.of(b.readByteArray())), okio.Buffer::clear) .switchIfEmpty(EMPTY_BYTE_STRING_MONO); } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { if (e.getMessage().startsWith("canceled due to") && e.getSuppressed().length == 1) { sink.error(e.getSuppressed()[0]); } else { sink.error(e); } } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<RequestBody> EMPTY_REQUEST_BODY_MONO = Mono.just(RequestBody.create(new byte[0])); final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) { Long contentLength = content.getLength(); if (contentLength == null) { String contentLengthHeaderValue = headers.getValue("Content-Length"); if (contentLengthHeaderValue != null) { contentLength = Long.parseLong(contentLengthHeaderValue); } else { contentLength = -1L; } } return contentLength; } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { sink.error(e); } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
Not at 7% perf penatly :/ (this is still under benchmarking but this is how it looks like atm)
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return Mono.just(RequestBody.create(ByteString.EMPTY, mediaType)); } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody((InputStreamContent) content, headers, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, headers, mediaType)); } else { OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody( content, headers, mediaType, writeTimeout); if (fluxRequestBody.contentLength() < 0 || fluxRequestBody.contentLength() > BUFFERED_FLUX_REQUEST_THRESHOLD) { return Mono.just(fluxRequestBody); } else { return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType)); } } }
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return EMPTY_REQUEST_BODY_MONO; } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else { long effectiveContentLength = getRequestContentLength(content, headers); if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody( (InputStreamContent) content, effectiveContentLength, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, effectiveContentLength, mediaType)); } else { return Mono.just(new OkHttpFluxRequestBody( content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis())); } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<okio.ByteString> EMPTY_BYTE_STRING_MONO = Mono.just(okio.ByteString.EMPTY); private static final long BUFFERED_FLUX_REQUEST_THRESHOLD = 10 * 1024L; final OkHttpClient httpClient; private final Duration writeTimeout; OkHttpAsyncHttpClient(OkHttpClient httpClient, Duration writeTimeout) { this.httpClient = httpClient; this.writeTimeout = writeTimeout; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ /** * Aggregate Flux of java.nio.ByteBuffer to single okio.ByteString. * * Pooled okio.Buffer type is used to buffer emitted ByteBuffer instances. Content of each ByteBuffer will be * written (i.e copied) to the internal okio.Buffer slots. Once the stream terminates, the contents of all slots get * copied to one single byte array and okio.ByteString will be created referring this byte array. Finally, the * initial okio.Buffer will be returned to the pool. * * @param bbFlux the Flux of ByteBuffer to aggregate * @return a mono emitting aggregated ByteString */ private static Mono<ByteString> toByteString(Flux<ByteBuffer> bbFlux) { Objects.requireNonNull(bbFlux, "'bbFlux' cannot be null."); return Mono.using(okio.Buffer::new, buffer -> bbFlux.reduce(buffer, (b, byteBuffer) -> { try { b.write(byteBuffer); return b; } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }).map(b -> ByteString.of(b.readByteArray())), okio.Buffer::clear) .switchIfEmpty(EMPTY_BYTE_STRING_MONO); } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { if (e.getMessage().startsWith("canceled due to") && e.getSuppressed().length == 1) { sink.error(e.getSuppressed()[0]); } else { sink.error(e); } } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<RequestBody> EMPTY_REQUEST_BODY_MONO = Mono.just(RequestBody.create(new byte[0])); final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) { Long contentLength = content.getLength(); if (contentLength == null) { String contentLengthHeaderValue = headers.getValue("Content-Length"); if (contentLengthHeaderValue != null) { contentLength = Long.parseLong(contentLengthHeaderValue); } else { contentLength = -1L; } } return contentLength; } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { sink.error(e); } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
Yes. this is how it was for a moment before I brought it back.
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return Mono.just(RequestBody.create(ByteString.EMPTY, mediaType)); } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody((InputStreamContent) content, headers, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, headers, mediaType)); } else { OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody( content, headers, mediaType, writeTimeout); if (fluxRequestBody.contentLength() < 0 || fluxRequestBody.contentLength() > BUFFERED_FLUX_REQUEST_THRESHOLD) { return Mono.just(fluxRequestBody); } else { return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType)); } } }
return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType));
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return EMPTY_REQUEST_BODY_MONO; } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else { long effectiveContentLength = getRequestContentLength(content, headers); if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody( (InputStreamContent) content, effectiveContentLength, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, effectiveContentLength, mediaType)); } else { return Mono.just(new OkHttpFluxRequestBody( content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis())); } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<okio.ByteString> EMPTY_BYTE_STRING_MONO = Mono.just(okio.ByteString.EMPTY); private static final long BUFFERED_FLUX_REQUEST_THRESHOLD = 10 * 1024L; final OkHttpClient httpClient; private final Duration writeTimeout; OkHttpAsyncHttpClient(OkHttpClient httpClient, Duration writeTimeout) { this.httpClient = httpClient; this.writeTimeout = writeTimeout; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ /** * Aggregate Flux of java.nio.ByteBuffer to single okio.ByteString. * * Pooled okio.Buffer type is used to buffer emitted ByteBuffer instances. Content of each ByteBuffer will be * written (i.e copied) to the internal okio.Buffer slots. Once the stream terminates, the contents of all slots get * copied to one single byte array and okio.ByteString will be created referring this byte array. Finally, the * initial okio.Buffer will be returned to the pool. * * @param bbFlux the Flux of ByteBuffer to aggregate * @return a mono emitting aggregated ByteString */ private static Mono<ByteString> toByteString(Flux<ByteBuffer> bbFlux) { Objects.requireNonNull(bbFlux, "'bbFlux' cannot be null."); return Mono.using(okio.Buffer::new, buffer -> bbFlux.reduce(buffer, (b, byteBuffer) -> { try { b.write(byteBuffer); return b; } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }).map(b -> ByteString.of(b.readByteArray())), okio.Buffer::clear) .switchIfEmpty(EMPTY_BYTE_STRING_MONO); } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { if (e.getMessage().startsWith("canceled due to") && e.getSuppressed().length == 1) { sink.error(e.getSuppressed()[0]); } else { sink.error(e); } } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<RequestBody> EMPTY_REQUEST_BODY_MONO = Mono.just(RequestBody.create(new byte[0])); final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) { Long contentLength = content.getLength(); if (contentLength == null) { String contentLengthHeaderValue = headers.getValue("Content-Length"); if (contentLengthHeaderValue != null) { contentLength = Long.parseLong(contentLengthHeaderValue); } else { contentLength = -1L; } } return contentLength; } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { sink.error(e); } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
This javadoc to me sounds like write timeout applies to whole outbound operation. https://github.com/Azure/azure-sdk-for-java/blob/afde57efa23a2232d90319d1125a91b765622dc7/sdk/core/azure-core-http-okhttp/src/main/java/com/azure/core/http/okhttp/OkHttpAsyncHttpClientBuilder.java#L123-L139
public void writeTo(BufferedSink bufferedSink) throws IOException { if (bodySent.compareAndSet(false, true)) { Mono<BufferedSink> requestSendMono = content.toFluxByteBuffer() .publishOn(Schedulers.boundedElastic()) .reduce(bufferedSink, (sink, buffer) -> { try { while (buffer.hasRemaining()) { sink.write(buffer); } return sink; } catch (IOException e) { throw Exceptions.propagate(e); } }); if (writeTimeout != null) { requestSendMono.block(writeTimeout); } else { requestSendMono.block(); } } else { throw LOGGER.logThrowableAsError(new IOException("Re-attempt to send Flux body is not supported")); } }
requestSendMono.block(writeTimeout);
public void writeTo(BufferedSink bufferedSink) throws IOException { if (bodySent.compareAndSet(false, true)) { Mono<Void> requestSendMono = content.toFluxByteBuffer() .flatMapSequential(buffer -> { if (Schedulers.isInNonBlockingThread()) { return Mono.just(buffer) .publishOn(Schedulers.boundedElastic()) .map(b -> writeBuffer(bufferedSink, b)) .then(); } else { writeBuffer(bufferedSink, buffer); return Mono.empty(); } }, 1, 1) .then(); if (callTimeoutMillis > 0) { /* * Default call timeout (in milliseconds). By default there is no timeout for complete calls, but * there is for the connection, write, and read actions within a call. */ requestSendMono.block(Duration.ofMillis(callTimeoutMillis)); } else { requestSendMono.block(); } } else { throw LOGGER.logThrowableAsError(new IOException("Re-attempt to send Flux body is not supported")); } }
class OkHttpFluxRequestBody extends OkHttpStreamableRequestBody<BinaryDataContent> { private static final ClientLogger LOGGER = new ClientLogger(OkHttpFluxRequestBody.class); private final AtomicBoolean bodySent = new AtomicBoolean(false); private final Duration writeTimeout; public OkHttpFluxRequestBody( BinaryDataContent content, HttpHeaders httpHeaders, MediaType mediaType, Duration writeTimeout) { super(content, httpHeaders, mediaType); this.writeTimeout = writeTimeout; } @Override }
class OkHttpFluxRequestBody extends OkHttpStreamableRequestBody<BinaryDataContent> { private static final ClientLogger LOGGER = new ClientLogger(OkHttpFluxRequestBody.class); private final AtomicBoolean bodySent = new AtomicBoolean(false); private final int callTimeoutMillis; public OkHttpFluxRequestBody( BinaryDataContent content, long effectiveContentLength, MediaType mediaType, int callTimeoutMillis) { super(content, effectiveContentLength, mediaType); this.callTimeoutMillis = callTimeoutMillis; } @Override private ByteBuffer writeBuffer(BufferedSink sink, ByteBuffer buffer) { try { while (buffer.hasRemaining()) { sink.write(buffer); } return buffer; } catch (IOException e) { throw Exceptions.propagate(e); } } }
this is getting replaced with `callTimeout`. The write timeout is behaving properly and is terminating this whenever socket is not fast enough. The OkHttp client docs have better wording for write timeout.
public void writeTo(BufferedSink bufferedSink) throws IOException { if (bodySent.compareAndSet(false, true)) { Mono<BufferedSink> requestSendMono = content.toFluxByteBuffer() .publishOn(Schedulers.boundedElastic()) .reduce(bufferedSink, (sink, buffer) -> { try { while (buffer.hasRemaining()) { sink.write(buffer); } return sink; } catch (IOException e) { throw Exceptions.propagate(e); } }); if (writeTimeout != null) { requestSendMono.block(writeTimeout); } else { requestSendMono.block(); } } else { throw LOGGER.logThrowableAsError(new IOException("Re-attempt to send Flux body is not supported")); } }
requestSendMono.block(writeTimeout);
public void writeTo(BufferedSink bufferedSink) throws IOException { if (bodySent.compareAndSet(false, true)) { Mono<Void> requestSendMono = content.toFluxByteBuffer() .flatMapSequential(buffer -> { if (Schedulers.isInNonBlockingThread()) { return Mono.just(buffer) .publishOn(Schedulers.boundedElastic()) .map(b -> writeBuffer(bufferedSink, b)) .then(); } else { writeBuffer(bufferedSink, buffer); return Mono.empty(); } }, 1, 1) .then(); if (callTimeoutMillis > 0) { /* * Default call timeout (in milliseconds). By default there is no timeout for complete calls, but * there is for the connection, write, and read actions within a call. */ requestSendMono.block(Duration.ofMillis(callTimeoutMillis)); } else { requestSendMono.block(); } } else { throw LOGGER.logThrowableAsError(new IOException("Re-attempt to send Flux body is not supported")); } }
class OkHttpFluxRequestBody extends OkHttpStreamableRequestBody<BinaryDataContent> { private static final ClientLogger LOGGER = new ClientLogger(OkHttpFluxRequestBody.class); private final AtomicBoolean bodySent = new AtomicBoolean(false); private final Duration writeTimeout; public OkHttpFluxRequestBody( BinaryDataContent content, HttpHeaders httpHeaders, MediaType mediaType, Duration writeTimeout) { super(content, httpHeaders, mediaType); this.writeTimeout = writeTimeout; } @Override }
class OkHttpFluxRequestBody extends OkHttpStreamableRequestBody<BinaryDataContent> { private static final ClientLogger LOGGER = new ClientLogger(OkHttpFluxRequestBody.class); private final AtomicBoolean bodySent = new AtomicBoolean(false); private final int callTimeoutMillis; public OkHttpFluxRequestBody( BinaryDataContent content, long effectiveContentLength, MediaType mediaType, int callTimeoutMillis) { super(content, effectiveContentLength, mediaType); this.callTimeoutMillis = callTimeoutMillis; } @Override private ByteBuffer writeBuffer(BufferedSink sink, ByteBuffer buffer) { try { while (buffer.hasRemaining()) { sink.write(buffer); } return buffer; } catch (IOException e) { throw Exceptions.propagate(e); } } }
btw. this code branch replaces previous flux processing, so callers get this right away. File/InputStream/Serializable will be dormant for a while.
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return Mono.just(RequestBody.create(ByteString.EMPTY, mediaType)); } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody((InputStreamContent) content, headers, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, headers, mediaType)); } else { OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody( content, headers, mediaType, writeTimeout); if (fluxRequestBody.contentLength() < 0 || fluxRequestBody.contentLength() > BUFFERED_FLUX_REQUEST_THRESHOLD) { return Mono.just(fluxRequestBody); } else { return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType)); } } }
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return EMPTY_REQUEST_BODY_MONO; } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else { long effectiveContentLength = getRequestContentLength(content, headers); if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody( (InputStreamContent) content, effectiveContentLength, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, effectiveContentLength, mediaType)); } else { return Mono.just(new OkHttpFluxRequestBody( content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis())); } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<okio.ByteString> EMPTY_BYTE_STRING_MONO = Mono.just(okio.ByteString.EMPTY); private static final long BUFFERED_FLUX_REQUEST_THRESHOLD = 10 * 1024L; final OkHttpClient httpClient; private final Duration writeTimeout; OkHttpAsyncHttpClient(OkHttpClient httpClient, Duration writeTimeout) { this.httpClient = httpClient; this.writeTimeout = writeTimeout; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ /** * Aggregate Flux of java.nio.ByteBuffer to single okio.ByteString. * * Pooled okio.Buffer type is used to buffer emitted ByteBuffer instances. Content of each ByteBuffer will be * written (i.e copied) to the internal okio.Buffer slots. Once the stream terminates, the contents of all slots get * copied to one single byte array and okio.ByteString will be created referring this byte array. Finally, the * initial okio.Buffer will be returned to the pool. * * @param bbFlux the Flux of ByteBuffer to aggregate * @return a mono emitting aggregated ByteString */ private static Mono<ByteString> toByteString(Flux<ByteBuffer> bbFlux) { Objects.requireNonNull(bbFlux, "'bbFlux' cannot be null."); return Mono.using(okio.Buffer::new, buffer -> bbFlux.reduce(buffer, (b, byteBuffer) -> { try { b.write(byteBuffer); return b; } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }).map(b -> ByteString.of(b.readByteArray())), okio.Buffer::clear) .switchIfEmpty(EMPTY_BYTE_STRING_MONO); } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { if (e.getMessage().startsWith("canceled due to") && e.getSuppressed().length == 1) { sink.error(e.getSuppressed()[0]); } else { sink.error(e); } } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<RequestBody> EMPTY_REQUEST_BODY_MONO = Mono.just(RequestBody.create(new byte[0])); final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) { Long contentLength = content.getLength(); if (contentLength == null) { String contentLengthHeaderValue = headers.getValue("Content-Length"); if (contentLengthHeaderValue != null) { contentLength = Long.parseLong(contentLengthHeaderValue); } else { contentLength = -1L; } } return contentLength; } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { sink.error(e); } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
Does `RequestBody` copy the `byte[]` contents? If it does we should turn this into a `Mono.fromCallable` just in the off chance the reactive stream gets cancelled before the `RequestBody` is used.
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return Mono.just(RequestBody.create(ByteString.EMPTY, mediaType)); } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody((InputStreamContent) content, headers, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, headers, mediaType)); } else { OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody( content, headers, mediaType, httpClient); if (fluxRequestBody.contentLength() < 0 || fluxRequestBody.contentLength() > BUFFERED_FLUX_REQUEST_THRESHOLD) { return Mono.just(fluxRequestBody); } else { return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType)); } } }
return Mono.just(RequestBody.create(content.toBytes(), mediaType));
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return EMPTY_REQUEST_BODY_MONO; } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else { long effectiveContentLength = getRequestContentLength(content, headers); if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody( (InputStreamContent) content, effectiveContentLength, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, effectiveContentLength, mediaType)); } else { return Mono.just(new OkHttpFluxRequestBody( content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis())); } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<okio.ByteString> EMPTY_BYTE_STRING_MONO = Mono.just(okio.ByteString.EMPTY); /** * This constant defines a size of Flux based request where buffering in memory becomes less performant * than streaming (which involves thread hops). The value has been established experimentally using * Storage benchmarks. */ private static final long BUFFERED_FLUX_REQUEST_THRESHOLD = 100 * 1024L; final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ /** * Aggregate Flux of java.nio.ByteBuffer to single okio.ByteString. * * Pooled okio.Buffer type is used to buffer emitted ByteBuffer instances. Content of each ByteBuffer will be * written (i.e copied) to the internal okio.Buffer slots. Once the stream terminates, the contents of all slots get * copied to one single byte array and okio.ByteString will be created referring this byte array. Finally, the * initial okio.Buffer will be returned to the pool. * * @param bbFlux the Flux of ByteBuffer to aggregate * @return a mono emitting aggregated ByteString */ private static Mono<ByteString> toByteString(Flux<ByteBuffer> bbFlux) { Objects.requireNonNull(bbFlux, "'bbFlux' cannot be null."); return Mono.using(okio.Buffer::new, buffer -> bbFlux.reduce(buffer, (b, byteBuffer) -> { try { b.write(byteBuffer); return b; } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }).map(b -> ByteString.of(b.readByteArray())), okio.Buffer::clear) .switchIfEmpty(EMPTY_BYTE_STRING_MONO); } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { if (e.getMessage().startsWith("canceled due to") && e.getSuppressed().length == 1) { sink.error(e.getSuppressed()[0]); } else { sink.error(e); } } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<RequestBody> EMPTY_REQUEST_BODY_MONO = Mono.just(RequestBody.create(new byte[0])); final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) { Long contentLength = content.getLength(); if (contentLength == null) { String contentLengthHeaderValue = headers.getValue("Content-Length"); if (contentLengthHeaderValue != null) { contentLength = Long.parseLong(contentLengthHeaderValue); } else { contentLength = -1L; } } return contentLength; } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { sink.error(e); } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
Sounds good, and longer term once there is a full synchronous call stack this won't be a concern anymore.
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return Mono.just(RequestBody.create(ByteString.EMPTY, mediaType)); } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody((InputStreamContent) content, headers, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, headers, mediaType)); } else { OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody( content, headers, mediaType, writeTimeout); if (fluxRequestBody.contentLength() < 0 || fluxRequestBody.contentLength() > BUFFERED_FLUX_REQUEST_THRESHOLD) { return Mono.just(fluxRequestBody); } else { return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType)); } } }
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return EMPTY_REQUEST_BODY_MONO; } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else { long effectiveContentLength = getRequestContentLength(content, headers); if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody( (InputStreamContent) content, effectiveContentLength, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, effectiveContentLength, mediaType)); } else { return Mono.just(new OkHttpFluxRequestBody( content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis())); } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<okio.ByteString> EMPTY_BYTE_STRING_MONO = Mono.just(okio.ByteString.EMPTY); private static final long BUFFERED_FLUX_REQUEST_THRESHOLD = 10 * 1024L; final OkHttpClient httpClient; private final Duration writeTimeout; OkHttpAsyncHttpClient(OkHttpClient httpClient, Duration writeTimeout) { this.httpClient = httpClient; this.writeTimeout = writeTimeout; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ /** * Aggregate Flux of java.nio.ByteBuffer to single okio.ByteString. * * Pooled okio.Buffer type is used to buffer emitted ByteBuffer instances. Content of each ByteBuffer will be * written (i.e copied) to the internal okio.Buffer slots. Once the stream terminates, the contents of all slots get * copied to one single byte array and okio.ByteString will be created referring this byte array. Finally, the * initial okio.Buffer will be returned to the pool. * * @param bbFlux the Flux of ByteBuffer to aggregate * @return a mono emitting aggregated ByteString */ private static Mono<ByteString> toByteString(Flux<ByteBuffer> bbFlux) { Objects.requireNonNull(bbFlux, "'bbFlux' cannot be null."); return Mono.using(okio.Buffer::new, buffer -> bbFlux.reduce(buffer, (b, byteBuffer) -> { try { b.write(byteBuffer); return b; } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }).map(b -> ByteString.of(b.readByteArray())), okio.Buffer::clear) .switchIfEmpty(EMPTY_BYTE_STRING_MONO); } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { if (e.getMessage().startsWith("canceled due to") && e.getSuppressed().length == 1) { sink.error(e.getSuppressed()[0]); } else { sink.error(e); } } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<RequestBody> EMPTY_REQUEST_BODY_MONO = Mono.just(RequestBody.create(new byte[0])); final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) { Long contentLength = content.getLength(); if (contentLength == null) { String contentLengthHeaderValue = headers.getValue("Content-Length"); if (contentLengthHeaderValue != null) { contentLength = Long.parseLong(contentLengthHeaderValue); } else { contentLength = -1L; } } return contentLength; } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { sink.error(e); } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
Shouldn't this be pushed into the if block and the if inspection be performed on the content directly?
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return Mono.just(RequestBody.create(ByteString.EMPTY, mediaType)); } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody((InputStreamContent) content, headers, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, headers, mediaType)); } else { OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody( content, headers, mediaType, httpClient); if (fluxRequestBody.contentLength() < 0 || fluxRequestBody.contentLength() > BUFFERED_FLUX_REQUEST_THRESHOLD) { return Mono.just(fluxRequestBody); } else { return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType)); } } }
OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody(
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return EMPTY_REQUEST_BODY_MONO; } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else { long effectiveContentLength = getRequestContentLength(content, headers); if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody( (InputStreamContent) content, effectiveContentLength, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, effectiveContentLength, mediaType)); } else { return Mono.just(new OkHttpFluxRequestBody( content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis())); } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<okio.ByteString> EMPTY_BYTE_STRING_MONO = Mono.just(okio.ByteString.EMPTY); /** * This constant defines a size of Flux based request where buffering in memory becomes less performant * than streaming (which involves thread hops). The value has been established experimentally using * Storage benchmarks. */ private static final long BUFFERED_FLUX_REQUEST_THRESHOLD = 100 * 1024L; final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ /** * Aggregate Flux of java.nio.ByteBuffer to single okio.ByteString. * * Pooled okio.Buffer type is used to buffer emitted ByteBuffer instances. Content of each ByteBuffer will be * written (i.e copied) to the internal okio.Buffer slots. Once the stream terminates, the contents of all slots get * copied to one single byte array and okio.ByteString will be created referring this byte array. Finally, the * initial okio.Buffer will be returned to the pool. * * @param bbFlux the Flux of ByteBuffer to aggregate * @return a mono emitting aggregated ByteString */ private static Mono<ByteString> toByteString(Flux<ByteBuffer> bbFlux) { Objects.requireNonNull(bbFlux, "'bbFlux' cannot be null."); return Mono.using(okio.Buffer::new, buffer -> bbFlux.reduce(buffer, (b, byteBuffer) -> { try { b.write(byteBuffer); return b; } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }).map(b -> ByteString.of(b.readByteArray())), okio.Buffer::clear) .switchIfEmpty(EMPTY_BYTE_STRING_MONO); } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { if (e.getMessage().startsWith("canceled due to") && e.getSuppressed().length == 1) { sink.error(e.getSuppressed()[0]); } else { sink.error(e); } } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<RequestBody> EMPTY_REQUEST_BODY_MONO = Mono.just(RequestBody.create(new byte[0])); final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) { Long contentLength = content.getLength(); if (contentLength == null) { String contentLengthHeaderValue = headers.getValue("Content-Length"); if (contentLengthHeaderValue != null) { contentLength = Long.parseLong(contentLengthHeaderValue); } else { contentLength = -1L; } } return contentLength; } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { sink.error(e); } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
I agree, I don't like this flow either. I'll refactor this.
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return Mono.just(RequestBody.create(ByteString.EMPTY, mediaType)); } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody((InputStreamContent) content, headers, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, headers, mediaType)); } else { OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody( content, headers, mediaType, httpClient); if (fluxRequestBody.contentLength() < 0 || fluxRequestBody.contentLength() > BUFFERED_FLUX_REQUEST_THRESHOLD) { return Mono.just(fluxRequestBody); } else { return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType)); } } }
OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody(
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return EMPTY_REQUEST_BODY_MONO; } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else { long effectiveContentLength = getRequestContentLength(content, headers); if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody( (InputStreamContent) content, effectiveContentLength, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, effectiveContentLength, mediaType)); } else { return Mono.just(new OkHttpFluxRequestBody( content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis())); } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<okio.ByteString> EMPTY_BYTE_STRING_MONO = Mono.just(okio.ByteString.EMPTY); /** * This constant defines a size of Flux based request where buffering in memory becomes less performant * than streaming (which involves thread hops). The value has been established experimentally using * Storage benchmarks. */ private static final long BUFFERED_FLUX_REQUEST_THRESHOLD = 100 * 1024L; final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ /** * Aggregate Flux of java.nio.ByteBuffer to single okio.ByteString. * * Pooled okio.Buffer type is used to buffer emitted ByteBuffer instances. Content of each ByteBuffer will be * written (i.e copied) to the internal okio.Buffer slots. Once the stream terminates, the contents of all slots get * copied to one single byte array and okio.ByteString will be created referring this byte array. Finally, the * initial okio.Buffer will be returned to the pool. * * @param bbFlux the Flux of ByteBuffer to aggregate * @return a mono emitting aggregated ByteString */ private static Mono<ByteString> toByteString(Flux<ByteBuffer> bbFlux) { Objects.requireNonNull(bbFlux, "'bbFlux' cannot be null."); return Mono.using(okio.Buffer::new, buffer -> bbFlux.reduce(buffer, (b, byteBuffer) -> { try { b.write(byteBuffer); return b; } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }).map(b -> ByteString.of(b.readByteArray())), okio.Buffer::clear) .switchIfEmpty(EMPTY_BYTE_STRING_MONO); } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { if (e.getMessage().startsWith("canceled due to") && e.getSuppressed().length == 1) { sink.error(e.getSuppressed()[0]); } else { sink.error(e); } } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<RequestBody> EMPTY_REQUEST_BODY_MONO = Mono.just(RequestBody.create(new byte[0])); final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) { Long contentLength = content.getLength(); if (contentLength == null) { String contentLengthHeaderValue = headers.getValue("Content-Length"); if (contentLengthHeaderValue != null) { contentLength = Long.parseLong(contentLengthHeaderValue); } else { contentLength = -1L; } } return contentLength; } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { sink.error(e); } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
It doesn't.
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return Mono.just(RequestBody.create(ByteString.EMPTY, mediaType)); } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody((InputStreamContent) content, headers, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, headers, mediaType)); } else { OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody( content, headers, mediaType, httpClient); if (fluxRequestBody.contentLength() < 0 || fluxRequestBody.contentLength() > BUFFERED_FLUX_REQUEST_THRESHOLD) { return Mono.just(fluxRequestBody); } else { return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType)); } } }
return Mono.just(RequestBody.create(content.toBytes(), mediaType));
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return EMPTY_REQUEST_BODY_MONO; } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else { long effectiveContentLength = getRequestContentLength(content, headers); if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody( (InputStreamContent) content, effectiveContentLength, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, effectiveContentLength, mediaType)); } else { return Mono.just(new OkHttpFluxRequestBody( content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis())); } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<okio.ByteString> EMPTY_BYTE_STRING_MONO = Mono.just(okio.ByteString.EMPTY); /** * This constant defines a size of Flux based request where buffering in memory becomes less performant * than streaming (which involves thread hops). The value has been established experimentally using * Storage benchmarks. */ private static final long BUFFERED_FLUX_REQUEST_THRESHOLD = 100 * 1024L; final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ /** * Aggregate Flux of java.nio.ByteBuffer to single okio.ByteString. * * Pooled okio.Buffer type is used to buffer emitted ByteBuffer instances. Content of each ByteBuffer will be * written (i.e copied) to the internal okio.Buffer slots. Once the stream terminates, the contents of all slots get * copied to one single byte array and okio.ByteString will be created referring this byte array. Finally, the * initial okio.Buffer will be returned to the pool. * * @param bbFlux the Flux of ByteBuffer to aggregate * @return a mono emitting aggregated ByteString */ private static Mono<ByteString> toByteString(Flux<ByteBuffer> bbFlux) { Objects.requireNonNull(bbFlux, "'bbFlux' cannot be null."); return Mono.using(okio.Buffer::new, buffer -> bbFlux.reduce(buffer, (b, byteBuffer) -> { try { b.write(byteBuffer); return b; } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }).map(b -> ByteString.of(b.readByteArray())), okio.Buffer::clear) .switchIfEmpty(EMPTY_BYTE_STRING_MONO); } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { if (e.getMessage().startsWith("canceled due to") && e.getSuppressed().length == 1) { sink.error(e.getSuppressed()[0]); } else { sink.error(e); } } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<RequestBody> EMPTY_REQUEST_BODY_MONO = Mono.just(RequestBody.create(new byte[0])); final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) { Long contentLength = content.getLength(); if (contentLength == null) { String contentLengthHeaderValue = headers.getValue("Content-Length"); if (contentLengthHeaderValue != null) { contentLength = Long.parseLong(contentLengthHeaderValue); } else { contentLength = -1L; } } return contentLength; } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { sink.error(e); } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
![image](https://user-images.githubusercontent.com/61715331/169103237-fb2c1eb3-6040-4e39-abd4-986937bda383.png)
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return Mono.just(RequestBody.create(ByteString.EMPTY, mediaType)); } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody((InputStreamContent) content, headers, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, headers, mediaType)); } else { OkHttpFluxRequestBody fluxRequestBody = new OkHttpFluxRequestBody( content, headers, mediaType, httpClient); if (fluxRequestBody.contentLength() < 0 || fluxRequestBody.contentLength() > BUFFERED_FLUX_REQUEST_THRESHOLD) { return Mono.just(fluxRequestBody); } else { return toByteString(bodyContent.toFluxByteBuffer()).map(bs -> RequestBody.create(bs, mediaType)); } } }
return Mono.just(RequestBody.create(content.toBytes(), mediaType));
private Mono<RequestBody> toOkHttpRequestBody(BinaryData bodyContent, HttpHeaders headers) { String contentType = headers.getValue("Content-Type"); MediaType mediaType = (contentType == null) ? null : MediaType.parse(contentType); if (bodyContent == null) { return EMPTY_REQUEST_BODY_MONO; } BinaryDataContent content = BinaryDataHelper.getContent(bodyContent); if (content instanceof ByteArrayContent) { return Mono.just(RequestBody.create(content.toBytes(), mediaType)); } else if (content instanceof StringContent || content instanceof SerializableContent) { return Mono.fromCallable(() -> RequestBody.create(content.toBytes(), mediaType)); } else { long effectiveContentLength = getRequestContentLength(content, headers); if (content instanceof InputStreamContent) { return Mono.just(new OkHttpInputStreamRequestBody( (InputStreamContent) content, effectiveContentLength, mediaType)); } else if (content instanceof FileContent) { return Mono.just(new OkHttpFileRequestBody((FileContent) content, effectiveContentLength, mediaType)); } else { return Mono.just(new OkHttpFluxRequestBody( content, effectiveContentLength, mediaType, httpClient.callTimeoutMillis())); } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<okio.ByteString> EMPTY_BYTE_STRING_MONO = Mono.just(okio.ByteString.EMPTY); /** * This constant defines a size of Flux based request where buffering in memory becomes less performant * than streaming (which involves thread hops). The value has been established experimentally using * Storage benchmarks. */ private static final long BUFFERED_FLUX_REQUEST_THRESHOLD = 100 * 1024L; final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ /** * Aggregate Flux of java.nio.ByteBuffer to single okio.ByteString. * * Pooled okio.Buffer type is used to buffer emitted ByteBuffer instances. Content of each ByteBuffer will be * written (i.e copied) to the internal okio.Buffer slots. Once the stream terminates, the contents of all slots get * copied to one single byte array and okio.ByteString will be created referring this byte array. Finally, the * initial okio.Buffer will be returned to the pool. * * @param bbFlux the Flux of ByteBuffer to aggregate * @return a mono emitting aggregated ByteString */ private static Mono<ByteString> toByteString(Flux<ByteBuffer> bbFlux) { Objects.requireNonNull(bbFlux, "'bbFlux' cannot be null."); return Mono.using(okio.Buffer::new, buffer -> bbFlux.reduce(buffer, (b, byteBuffer) -> { try { b.write(byteBuffer); return b; } catch (IOException ioe) { throw Exceptions.propagate(ioe); } }).map(b -> ByteString.of(b.readByteArray())), okio.Buffer::clear) .switchIfEmpty(EMPTY_BYTE_STRING_MONO); } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { if (e.getMessage().startsWith("canceled due to") && e.getSuppressed().length == 1) { sink.error(e.getSuppressed()[0]); } else { sink.error(e); } } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
class OkHttpAsyncHttpClient implements HttpClient { private static final Mono<RequestBody> EMPTY_REQUEST_BODY_MONO = Mono.just(RequestBody.create(new byte[0])); final OkHttpClient httpClient; OkHttpAsyncHttpClient(OkHttpClient httpClient) { this.httpClient = httpClient; } @Override public Mono<HttpResponse> send(HttpRequest request) { return send(request, Context.NONE); } @Override public Mono<HttpResponse> send(HttpRequest request, Context context) { boolean eagerlyReadResponse = (boolean) context.getData("azure-eagerly-read-response").orElse(false); return Mono.create(sink -> sink.onRequest(value -> { toOkHttpRequest(request).subscribe(okHttpRequest -> { try { Call call = httpClient.newCall(okHttpRequest); call.enqueue(new OkHttpCallback(sink, request, eagerlyReadResponse)); sink.onCancel(call::cancel); } catch (Exception ex) { sink.error(ex); } }, sink::error); })); } /** * Converts the given azure-core request to okhttp request. * * @param request the azure-core request * @return the Mono emitting okhttp request */ private Mono<okhttp3.Request> toOkHttpRequest(HttpRequest request) { Request.Builder requestBuilder = new Request.Builder() .url(request.getUrl()); if (request.getHeaders() != null) { for (HttpHeader hdr : request.getHeaders()) { hdr.getValuesList().forEach(value -> requestBuilder.addHeader(hdr.getName(), value)); } } if (request.getHttpMethod() == HttpMethod.GET) { return Mono.just(requestBuilder.get().build()); } else if (request.getHttpMethod() == HttpMethod.HEAD) { return Mono.just(requestBuilder.head().build()); } return toOkHttpRequestBody(request.getBodyAsBinaryData(), request.getHeaders()) .map(okhttpRequestBody -> requestBuilder.method(request.getHttpMethod().toString(), okhttpRequestBody) .build()); } /** * Create a Mono of okhttp3.RequestBody from the given BinaryData. * * @param bodyContent The request body content * @param headers the headers associated with the original request * @return the Mono emitting okhttp request */ private static long getRequestContentLength(BinaryDataContent content, HttpHeaders headers) { Long contentLength = content.getLength(); if (contentLength == null) { String contentLengthHeaderValue = headers.getValue("Content-Length"); if (contentLengthHeaderValue != null) { contentLength = Long.parseLong(contentLengthHeaderValue); } else { contentLength = -1L; } } return contentLength; } private static class OkHttpCallback implements okhttp3.Callback { private final MonoSink<HttpResponse> sink; private final HttpRequest request; private final boolean eagerlyReadResponse; OkHttpCallback(MonoSink<HttpResponse> sink, HttpRequest request, boolean eagerlyReadResponse) { this.sink = sink; this.request = request; this.eagerlyReadResponse = eagerlyReadResponse; } @SuppressWarnings("NullableProblems") @Override public void onFailure(okhttp3.Call call, IOException e) { sink.error(e); } @SuppressWarnings("NullableProblems") @Override public void onResponse(okhttp3.Call call, okhttp3.Response response) { /* * Use a buffered response when we are eagerly reading the response from the network and the body isn't * empty. */ if (eagerlyReadResponse) { ResponseBody body = response.body(); if (Objects.nonNull(body)) { try { byte[] bytes = body.bytes(); body.close(); sink.success(new OkHttpAsyncBufferedResponse(response, request, bytes)); } catch (IOException ex) { sink.error(ex); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } else { sink.success(new OkHttpAsyncResponse(response, request)); } } } }
is this conventional to use `Context.NONE` as a value even though it's a different type from what `SPAN_CONTEXT_KEY` normally holds?
private EventData traceMessageSpan(EventData eventData) { Optional<Object> eventContextData = eventData.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return eventData; } else { Context eventContext = eventData.getContext() .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE) .addData(ENTITY_PATH_KEY, this.entityPath) .addData(HOST_NAME_KEY, this.hostname); eventContext = tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, eventContext, ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { eventData.getProperties().put(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventContext, Signal.complete()); eventData.addContext(SPAN_CONTEXT_KEY, eventContext.getData(SPAN_CONTEXT_KEY).orElse(Context.NONE)); } } return eventData; }
eventData.addContext(SPAN_CONTEXT_KEY, eventContext.getData(SPAN_CONTEXT_KEY).orElse(Context.NONE));
private EventData traceMessageSpan(EventData eventData) { Optional<Object> eventContextData = eventData.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return eventData; } else { Context eventContext = eventData.getContext() .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE) .addData(ENTITY_PATH_KEY, this.entityPath) .addData(HOST_NAME_KEY, this.hostname); eventContext = tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, eventContext, ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { eventData.getProperties().put(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventContext, Signal.complete()); Object spanContext = eventContext.getData(SPAN_CONTEXT_KEY).orElse(null); if (spanContext != null) { eventData.addContext(SPAN_CONTEXT_KEY, spanContext); } } } return eventData; }
class EventDataBatch { private static final ClientLogger LOGGER = new ClientLogger(EventDataBatch.class); private final int maxMessageSize; private final String partitionKey; private final ErrorContextProvider contextProvider; private final List<EventData> events; private final byte[] eventBytes; private final String partitionId; private int sizeInBytes; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; EventDataBatch(int maxMessageSize, String partitionId, String partitionKey, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname) { this.maxMessageSize = maxMessageSize; this.partitionKey = partitionKey; this.partitionId = partitionId; this.contextProvider = contextProvider; this.events = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; } /** * Gets the number of {@link EventData events} in the batch. * * @return The number of {@link EventData events} in the batch. */ public int getCount() { return events.size(); } /** * Gets the maximum size, in bytes, of the {@link EventDataBatch}. * * @return The maximum size, in bytes, of the {@link EventDataBatch}. */ public int getMaxSizeInBytes() { return maxMessageSize; } /** * Gets the size of the {@link EventDataBatch} in bytes. * * @return the size of the {@link EventDataBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link EventData event} to the batch. * * <p>This method is not thread-safe; make sure to synchronize the method access when using multiple threads * to add events.</p> * * @param eventData The {@link EventData} to add to the batch. * @return {@code true} if the event could be added to the batch; {@code false} if the event was too large to fit in * the batch. * @throws IllegalArgumentException if {@code eventData} is {@code null}. * @throws AmqpException if {@code eventData} is larger than the maximum size of the {@link EventDataBatch}. */ public boolean tryAdd(final EventData eventData) { if (eventData == null) { throw LOGGER.logExceptionAsWarning(new NullPointerException("eventData cannot be null")); } EventData event = tracerProvider.isEnabled() ? traceMessageSpan(eventData) : eventData; final int size; try { size = getSize(event, events.isEmpty()); } catch (BufferOverflowException exception) { throw LOGGER.logExceptionAsWarning(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; this.events.add(event); return true; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the message. * * @param eventData The Event to add tracing span for. * @return the updated event data object. */ List<EventData> getEvents() { return events; } String getPartitionKey() { return partitionKey; } String getPartitionId() { return partitionId; } private int getSize(final EventData eventData, final boolean isFirst) { Objects.requireNonNull(eventData, "'eventData' cannot be null."); final Message amqpMessage = createAmqpMessage(eventData, partitionKey); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } /* * Creates the AMQP message represented by the event data */ private static Message createAmqpMessage(EventData event, String partitionKey) { final AmqpAnnotatedMessage amqpAnnotatedMessage = event.getRawAmqpMessage(); final Message protonJ = MessageUtils.toProtonJMessage(amqpAnnotatedMessage); if (partitionKey == null) { return protonJ; } if (protonJ.getMessageAnnotations() == null) { protonJ.setMessageAnnotations(new MessageAnnotations(new HashMap<>())); } final MessageAnnotations messageAnnotations = protonJ.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); return protonJ; } }
class EventDataBatch { private static final ClientLogger LOGGER = new ClientLogger(EventDataBatch.class); private final int maxMessageSize; private final String partitionKey; private final ErrorContextProvider contextProvider; private final List<EventData> events; private final byte[] eventBytes; private final String partitionId; private int sizeInBytes; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; EventDataBatch(int maxMessageSize, String partitionId, String partitionKey, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname) { this.maxMessageSize = maxMessageSize; this.partitionKey = partitionKey; this.partitionId = partitionId; this.contextProvider = contextProvider; this.events = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; } /** * Gets the number of {@link EventData events} in the batch. * * @return The number of {@link EventData events} in the batch. */ public int getCount() { return events.size(); } /** * Gets the maximum size, in bytes, of the {@link EventDataBatch}. * * @return The maximum size, in bytes, of the {@link EventDataBatch}. */ public int getMaxSizeInBytes() { return maxMessageSize; } /** * Gets the size of the {@link EventDataBatch} in bytes. * * @return the size of the {@link EventDataBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link EventData event} to the batch. * * <p>This method is not thread-safe; make sure to synchronize the method access when using multiple threads * to add events.</p> * * @param eventData The {@link EventData} to add to the batch. * @return {@code true} if the event could be added to the batch; {@code false} if the event was too large to fit in * the batch. * @throws IllegalArgumentException if {@code eventData} is {@code null}. * @throws AmqpException if {@code eventData} is larger than the maximum size of the {@link EventDataBatch}. */ public boolean tryAdd(final EventData eventData) { if (eventData == null) { throw LOGGER.logExceptionAsWarning(new NullPointerException("eventData cannot be null")); } EventData event = tracerProvider.isEnabled() ? traceMessageSpan(eventData) : eventData; final int size; try { size = getSize(event, events.isEmpty()); } catch (BufferOverflowException exception) { throw LOGGER.logExceptionAsWarning(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; this.events.add(event); return true; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the message. * * @param eventData The Event to add tracing span for. * @return the updated event data object. */ List<EventData> getEvents() { return events; } String getPartitionKey() { return partitionKey; } String getPartitionId() { return partitionId; } private int getSize(final EventData eventData, final boolean isFirst) { Objects.requireNonNull(eventData, "'eventData' cannot be null."); final Message amqpMessage = createAmqpMessage(eventData, partitionKey); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } /* * Creates the AMQP message represented by the event data */ private static Message createAmqpMessage(EventData event, String partitionKey) { final AmqpAnnotatedMessage amqpAnnotatedMessage = event.getRawAmqpMessage(); final Message protonJ = MessageUtils.toProtonJMessage(amqpAnnotatedMessage); if (partitionKey == null) { return protonJ; } if (protonJ.getMessageAnnotations() == null) { protonJ.setMessageAnnotations(new MessageAnnotations(new HashMap<>())); } final MessageAnnotations messageAnnotations = protonJ.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); return protonJ; } }
good catch, I'll fix it.
private EventData traceMessageSpan(EventData eventData) { Optional<Object> eventContextData = eventData.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return eventData; } else { Context eventContext = eventData.getContext() .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE) .addData(ENTITY_PATH_KEY, this.entityPath) .addData(HOST_NAME_KEY, this.hostname); eventContext = tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, eventContext, ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { eventData.getProperties().put(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventContext, Signal.complete()); eventData.addContext(SPAN_CONTEXT_KEY, eventContext.getData(SPAN_CONTEXT_KEY).orElse(Context.NONE)); } } return eventData; }
eventData.addContext(SPAN_CONTEXT_KEY, eventContext.getData(SPAN_CONTEXT_KEY).orElse(Context.NONE));
private EventData traceMessageSpan(EventData eventData) { Optional<Object> eventContextData = eventData.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return eventData; } else { Context eventContext = eventData.getContext() .addData(AZ_TRACING_NAMESPACE_KEY, AZ_NAMESPACE_VALUE) .addData(ENTITY_PATH_KEY, this.entityPath) .addData(HOST_NAME_KEY, this.hostname); eventContext = tracerProvider.startSpan(AZ_TRACING_SERVICE_NAME, eventContext, ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { eventData.getProperties().put(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventContext, Signal.complete()); Object spanContext = eventContext.getData(SPAN_CONTEXT_KEY).orElse(null); if (spanContext != null) { eventData.addContext(SPAN_CONTEXT_KEY, spanContext); } } } return eventData; }
class EventDataBatch { private static final ClientLogger LOGGER = new ClientLogger(EventDataBatch.class); private final int maxMessageSize; private final String partitionKey; private final ErrorContextProvider contextProvider; private final List<EventData> events; private final byte[] eventBytes; private final String partitionId; private int sizeInBytes; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; EventDataBatch(int maxMessageSize, String partitionId, String partitionKey, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname) { this.maxMessageSize = maxMessageSize; this.partitionKey = partitionKey; this.partitionId = partitionId; this.contextProvider = contextProvider; this.events = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; } /** * Gets the number of {@link EventData events} in the batch. * * @return The number of {@link EventData events} in the batch. */ public int getCount() { return events.size(); } /** * Gets the maximum size, in bytes, of the {@link EventDataBatch}. * * @return The maximum size, in bytes, of the {@link EventDataBatch}. */ public int getMaxSizeInBytes() { return maxMessageSize; } /** * Gets the size of the {@link EventDataBatch} in bytes. * * @return the size of the {@link EventDataBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link EventData event} to the batch. * * <p>This method is not thread-safe; make sure to synchronize the method access when using multiple threads * to add events.</p> * * @param eventData The {@link EventData} to add to the batch. * @return {@code true} if the event could be added to the batch; {@code false} if the event was too large to fit in * the batch. * @throws IllegalArgumentException if {@code eventData} is {@code null}. * @throws AmqpException if {@code eventData} is larger than the maximum size of the {@link EventDataBatch}. */ public boolean tryAdd(final EventData eventData) { if (eventData == null) { throw LOGGER.logExceptionAsWarning(new NullPointerException("eventData cannot be null")); } EventData event = tracerProvider.isEnabled() ? traceMessageSpan(eventData) : eventData; final int size; try { size = getSize(event, events.isEmpty()); } catch (BufferOverflowException exception) { throw LOGGER.logExceptionAsWarning(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; this.events.add(event); return true; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the message. * * @param eventData The Event to add tracing span for. * @return the updated event data object. */ List<EventData> getEvents() { return events; } String getPartitionKey() { return partitionKey; } String getPartitionId() { return partitionId; } private int getSize(final EventData eventData, final boolean isFirst) { Objects.requireNonNull(eventData, "'eventData' cannot be null."); final Message amqpMessage = createAmqpMessage(eventData, partitionKey); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } /* * Creates the AMQP message represented by the event data */ private static Message createAmqpMessage(EventData event, String partitionKey) { final AmqpAnnotatedMessage amqpAnnotatedMessage = event.getRawAmqpMessage(); final Message protonJ = MessageUtils.toProtonJMessage(amqpAnnotatedMessage); if (partitionKey == null) { return protonJ; } if (protonJ.getMessageAnnotations() == null) { protonJ.setMessageAnnotations(new MessageAnnotations(new HashMap<>())); } final MessageAnnotations messageAnnotations = protonJ.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); return protonJ; } }
class EventDataBatch { private static final ClientLogger LOGGER = new ClientLogger(EventDataBatch.class); private final int maxMessageSize; private final String partitionKey; private final ErrorContextProvider contextProvider; private final List<EventData> events; private final byte[] eventBytes; private final String partitionId; private int sizeInBytes; private final TracerProvider tracerProvider; private final String entityPath; private final String hostname; EventDataBatch(int maxMessageSize, String partitionId, String partitionKey, ErrorContextProvider contextProvider, TracerProvider tracerProvider, String entityPath, String hostname) { this.maxMessageSize = maxMessageSize; this.partitionKey = partitionKey; this.partitionId = partitionId; this.contextProvider = contextProvider; this.events = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; this.entityPath = entityPath; this.hostname = hostname; } /** * Gets the number of {@link EventData events} in the batch. * * @return The number of {@link EventData events} in the batch. */ public int getCount() { return events.size(); } /** * Gets the maximum size, in bytes, of the {@link EventDataBatch}. * * @return The maximum size, in bytes, of the {@link EventDataBatch}. */ public int getMaxSizeInBytes() { return maxMessageSize; } /** * Gets the size of the {@link EventDataBatch} in bytes. * * @return the size of the {@link EventDataBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link EventData event} to the batch. * * <p>This method is not thread-safe; make sure to synchronize the method access when using multiple threads * to add events.</p> * * @param eventData The {@link EventData} to add to the batch. * @return {@code true} if the event could be added to the batch; {@code false} if the event was too large to fit in * the batch. * @throws IllegalArgumentException if {@code eventData} is {@code null}. * @throws AmqpException if {@code eventData} is larger than the maximum size of the {@link EventDataBatch}. */ public boolean tryAdd(final EventData eventData) { if (eventData == null) { throw LOGGER.logExceptionAsWarning(new NullPointerException("eventData cannot be null")); } EventData event = tracerProvider.isEnabled() ? traceMessageSpan(eventData) : eventData; final int size; try { size = getSize(event, events.isEmpty()); } catch (BufferOverflowException exception) { throw LOGGER.logExceptionAsWarning(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; this.events.add(event); return true; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the message. * * @param eventData The Event to add tracing span for. * @return the updated event data object. */ List<EventData> getEvents() { return events; } String getPartitionKey() { return partitionKey; } String getPartitionId() { return partitionId; } private int getSize(final EventData eventData, final boolean isFirst) { Objects.requireNonNull(eventData, "'eventData' cannot be null."); final Message amqpMessage = createAmqpMessage(eventData, partitionKey); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } /* * Creates the AMQP message represented by the event data */ private static Message createAmqpMessage(EventData event, String partitionKey) { final AmqpAnnotatedMessage amqpAnnotatedMessage = event.getRawAmqpMessage(); final Message protonJ = MessageUtils.toProtonJMessage(amqpAnnotatedMessage); if (partitionKey == null) { return protonJ; } if (protonJ.getMessageAnnotations() == null) { protonJ.setMessageAnnotations(new MessageAnnotations(new HashMap<>())); } final MessageAnnotations messageAnnotations = protonJ.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); return protonJ; } }
Are we changing the logger level to `debug` because we expect this race condition to be hit more often?
public static PartitionKeyAccessor getPartitionKeyAccessor() { if (!partitionKeyClassLoaded.get()) { logger.debug("Initializing PartitionKeyAccessor..."); PartitionKey.doNothingButEnsureLoadingClass(); } PartitionKeyAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("PartitionKeyAccessor is not initialized yet!"); System.exit(9701); } return snapshot; }
logger.debug("Initializing PartitionKeyAccessor...");
public static PartitionKeyAccessor getPartitionKeyAccessor() { if (!partitionKeyClassLoaded.get()) { logger.debug("Initializing PartitionKeyAccessor..."); PartitionKey.doNothingButEnsureLoadingClass(); } PartitionKeyAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("PartitionKeyAccessor is not initialized yet!"); System.exit(9701); } return snapshot; }
class PartitionKeyHelper { private final static AtomicBoolean partitionKeyClassLoaded = new AtomicBoolean(false); private final static AtomicReference<PartitionKeyAccessor> accessor = new AtomicReference<>(); private PartitionKeyHelper() {} public static void setPartitionKeyAccessor(final PartitionKeyAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("PartitionKeyAccessor already initialized!"); } else { logger.info("Setting PartitionKeyAccessor..."); partitionKeyClassLoaded.set(true); } } public interface PartitionKeyAccessor { PartitionKey toPartitionKey(PartitionKeyInternal partitionKeyInternal); } }
class PartitionKeyHelper { private final static AtomicBoolean partitionKeyClassLoaded = new AtomicBoolean(false); private final static AtomicReference<PartitionKeyAccessor> accessor = new AtomicReference<>(); private PartitionKeyHelper() {} public static void setPartitionKeyAccessor(final PartitionKeyAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("PartitionKeyAccessor already initialized!"); } else { logger.info("Setting PartitionKeyAccessor..."); partitionKeyClassLoaded.set(true); } } public interface PartitionKeyAccessor { PartitionKey toPartitionKey(PartitionKeyInternal partitionKeyInternal); } }
Wouldn't setting the accessor have the same race condition issue that was with getter?
public static void setCosmosClientBuilderAccessor(final CosmosClientBuilderAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("CosmosClientBuilderAccessor already initialized!"); } else { logger.info("Setting CosmosClientBuilderAccessor..."); cosmosClientBuilderClassLoaded.set(true); } }
cosmosClientBuilderClassLoaded.set(true);
public static void setCosmosClientBuilderAccessor(final CosmosClientBuilderAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("CosmosClientBuilderAccessor already initialized!"); } else { logger.info("Setting CosmosClientBuilderAccessor..."); cosmosClientBuilderClassLoaded.set(true); } }
class CosmosClientBuilderHelper { private static final AtomicReference<CosmosClientBuilderAccessor> accessor = new AtomicReference<>(); private static final AtomicBoolean cosmosClientBuilderClassLoaded = new AtomicBoolean(false); private CosmosClientBuilderHelper() {} public static CosmosClientBuilderAccessor getCosmosClientBuilderAccessor() { if (!cosmosClientBuilderClassLoaded.get()) { logger.debug("Initializing CosmosClientBuilderAccessor..."); CosmosClientBuilder.doNothingButEnsureLoadingClass(); } CosmosClientBuilderAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("CosmosClientBuilderAccessor is not initialized yet!"); System.exit(9700); } return snapshot; } public interface CosmosClientBuilderAccessor { void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder, CosmosClientMetadataCachesSnapshot metadataCache); CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder); void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType); ApiType getCosmosClientApiType(CosmosClientBuilder builder); ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder); Configs getConfigs(CosmosClientBuilder builder); ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder); } }
class CosmosClientBuilderHelper { private static final AtomicReference<CosmosClientBuilderAccessor> accessor = new AtomicReference<>(); private static final AtomicBoolean cosmosClientBuilderClassLoaded = new AtomicBoolean(false); private CosmosClientBuilderHelper() {} public static CosmosClientBuilderAccessor getCosmosClientBuilderAccessor() { if (!cosmosClientBuilderClassLoaded.get()) { logger.debug("Initializing CosmosClientBuilderAccessor..."); CosmosClientBuilder.doNothingButEnsureLoadingClass(); } CosmosClientBuilderAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("CosmosClientBuilderAccessor is not initialized yet!"); System.exit(9700); } return snapshot; } public interface CosmosClientBuilderAccessor { void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder, CosmosClientMetadataCachesSnapshot metadataCache); CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder); void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType); ApiType getCosmosClientApiType(CosmosClientBuilder builder); ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder); Configs getConfigs(CosmosClientBuilder builder); ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder); } }
If we are going to turn on warning logs, we probably want to provide a code or environmental toggle to disable them so the customer doesn't get noisy logs if they can't immediately upgrade and operate at large scale. Will coordinate offline for a well-defined strategy here.
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, which is no longer " + "considered secure. The default is v1 for compatibility reasons, but it is highly recommended " + "the version be set to v2 using this builder"); } this.encryptionVersion = encryptionVersion == null ? EncryptionVersion.V1 : encryptionVersion; /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion); }
+ "the version be set to v2 using this builder");
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); this.encryptionVersion = encryptionVersion == null ? EncryptionVersion.V1 : encryptionVersion; if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, which is no longer " + "considered secure. The default is v1 for compatibility reasons, but it is highly recommended " + "the version be set to v2 using the constructor"); } /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion); }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; /** * Creates a new instance of the EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } private BlobAsyncClient getUnencryptedBlobClient() { BlobClientBuilder builder = new BlobClientBuilder() .endpoint(endpoint) .containerName(containerName) .blobName(blobName) .snapshot(snapshot) .customerProvidedKey( customerProvidedKey == null ? null : new CustomerProvidedKey(customerProvidedKey.getEncryptionKey())) .encryptionScope(encryptionScope == null ? null : encryptionScope.getEncryptionScope()) .versionId(versionId) .serviceVersion(version) .httpClient(httpClient); if (storageSharedKeyCredential != null) { builder.credential(storageSharedKeyCredential); } else if (tokenCredential != null) { builder.credential(tokenCredential); } else if (azureSasCredential != null) { builder.credential(azureSasCredential); } else if (sasToken != null) { builder.credential(new AzureSasCredential(sasToken)); } return builder.buildAsyncClient(); } private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy || currPolicy instanceof FetchEncryptionVersionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); policies.add(0, new FetchEncryptionVersionPolicy(getUnencryptedBlobClient(), requiresEncryption)); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new FetchEncryptionVersionPolicy(getUnencryptedBlobClient(), requiresEncryption)); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); String applicationId = clientOptions.getApplicationId() != null ? clientOptions.getApplicationId() : logOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.put(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the blob name contains special characters, pass in the url encoded version of the blob name. </p> * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : Utility.urlEncode(parts.getBlobName()); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName, "'blobName' cannot be null."))); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } /** * Sets the encryption version for this client. For any new workloads, using version 2 or above is highly * encouraged as version 1 uses AES/CBC, which is no longer considered secure. For compatibility reasons, the * default value is version 1. * @param version The encryption version. * @return The updated builder. */ public EncryptedBlobClientBuilder encryptionVersion(EncryptionVersion version) { this.encryptionVersion = version; return this; } }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; /** * Creates a new instance of the EncryptedBlobClientBuilder * @deprecated Use {@link EncryptedBlobClientBuilder */ @Deprecated public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * * @param version The version of the client side encryption protocol to use. It is highly recommended that v2 be * preferred for security reasons, though v1 continues to be supported for compatibility reasons. Note that even a * client configured to encrypt using v2 can decrypt blobs that use the v1 protocol. */ public EncryptedBlobClientBuilder(EncryptionVersion version) { Objects.requireNonNull(version); logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = version; } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } private BlobAsyncClient getUnencryptedBlobClient() { BlobClientBuilder builder = new BlobClientBuilder() .endpoint(endpoint) .containerName(containerName) .blobName(blobName) .snapshot(snapshot) .customerProvidedKey( customerProvidedKey == null ? null : new CustomerProvidedKey(customerProvidedKey.getEncryptionKey())) .encryptionScope(encryptionScope == null ? null : encryptionScope.getEncryptionScope()) .versionId(versionId) .serviceVersion(version) .pipeline(this.httpPipeline) .httpClient(httpClient) .configuration(configuration) .retryOptions(this.retryOptions) .clientOptions(this.clientOptions); if (storageSharedKeyCredential != null) { builder.credential(storageSharedKeyCredential); } else if (tokenCredential != null) { builder.credential(tokenCredential); } else if (azureSasCredential != null) { builder.credential(azureSasCredential); } else if (sasToken != null) { builder.credential(new AzureSasCredential(sasToken)); } for (HttpPipelinePolicy policy : perCallPolicies) { builder.addPolicy(policy); } for (HttpPipelinePolicy policy : perRetryPolicies) { builder.addPolicy(policy); } return builder.buildAsyncClient(); } private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption, getUnencryptedBlobClient())); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption, getUnencryptedBlobClient())); String applicationId = clientOptions.getApplicationId() != null ? clientOptions.getApplicationId() : logOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.put(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the blob name contains special characters, pass in the url encoded version of the blob name. </p> * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : Utility.urlEncode(parts.getBlobName()); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName, "'blobName' cannot be null."))); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } }
Why is this info? Should we do debug for this as well?
public static void setPartitionKeyAccessor(final PartitionKeyAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("PartitionKeyAccessor already initialized!"); } else { logger.info("Setting PartitionKeyAccessor..."); partitionKeyClassLoaded.set(true); } }
logger.info("Setting PartitionKeyAccessor...");
public static void setPartitionKeyAccessor(final PartitionKeyAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("PartitionKeyAccessor already initialized!"); } else { logger.info("Setting PartitionKeyAccessor..."); partitionKeyClassLoaded.set(true); } }
class PartitionKeyHelper { private final static AtomicBoolean partitionKeyClassLoaded = new AtomicBoolean(false); private final static AtomicReference<PartitionKeyAccessor> accessor = new AtomicReference<>(); private PartitionKeyHelper() {} public static PartitionKeyAccessor getPartitionKeyAccessor() { if (!partitionKeyClassLoaded.get()) { logger.debug("Initializing PartitionKeyAccessor..."); PartitionKey.doNothingButEnsureLoadingClass(); } PartitionKeyAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("PartitionKeyAccessor is not initialized yet!"); System.exit(9701); } return snapshot; } public interface PartitionKeyAccessor { PartitionKey toPartitionKey(PartitionKeyInternal partitionKeyInternal); } }
class PartitionKeyHelper { private final static AtomicBoolean partitionKeyClassLoaded = new AtomicBoolean(false); private final static AtomicReference<PartitionKeyAccessor> accessor = new AtomicReference<>(); private PartitionKeyHelper() {} public static PartitionKeyAccessor getPartitionKeyAccessor() { if (!partitionKeyClassLoaded.get()) { logger.debug("Initializing PartitionKeyAccessor..."); PartitionKey.doNothingButEnsureLoadingClass(); } PartitionKeyAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("PartitionKeyAccessor is not initialized yet!"); System.exit(9701); } return snapshot; } public interface PartitionKeyAccessor { PartitionKey toPartitionKey(PartitionKeyInternal partitionKeyInternal); } }
Before there was no race condition (for the inner part of the if-block) - the line would have been logged exactly once. Now when multiple threads try to get the accessor concurrently multiple of them would log (until the accessor has actually been initialized by a single thread). I changed the log level because we might see multiple of these logs - up to number of cores. I am ok with still using info if you think it is needed (I think the two info logs should be sufficient - but given that we have spent 3 PRs on this open for changing to info)
public static PartitionKeyAccessor getPartitionKeyAccessor() { if (!partitionKeyClassLoaded.get()) { logger.debug("Initializing PartitionKeyAccessor..."); PartitionKey.doNothingButEnsureLoadingClass(); } PartitionKeyAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("PartitionKeyAccessor is not initialized yet!"); System.exit(9701); } return snapshot; }
logger.debug("Initializing PartitionKeyAccessor...");
public static PartitionKeyAccessor getPartitionKeyAccessor() { if (!partitionKeyClassLoaded.get()) { logger.debug("Initializing PartitionKeyAccessor..."); PartitionKey.doNothingButEnsureLoadingClass(); } PartitionKeyAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("PartitionKeyAccessor is not initialized yet!"); System.exit(9701); } return snapshot; }
class PartitionKeyHelper { private final static AtomicBoolean partitionKeyClassLoaded = new AtomicBoolean(false); private final static AtomicReference<PartitionKeyAccessor> accessor = new AtomicReference<>(); private PartitionKeyHelper() {} public static void setPartitionKeyAccessor(final PartitionKeyAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("PartitionKeyAccessor already initialized!"); } else { logger.info("Setting PartitionKeyAccessor..."); partitionKeyClassLoaded.set(true); } } public interface PartitionKeyAccessor { PartitionKey toPartitionKey(PartitionKeyInternal partitionKeyInternal); } }
class PartitionKeyHelper { private final static AtomicBoolean partitionKeyClassLoaded = new AtomicBoolean(false); private final static AtomicReference<PartitionKeyAccessor> accessor = new AtomicReference<>(); private PartitionKeyHelper() {} public static void setPartitionKeyAccessor(final PartitionKeyAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("PartitionKeyAccessor already initialized!"); } else { logger.info("Setting PartitionKeyAccessor..."); partitionKeyClassLoaded.set(true); } } public interface PartitionKeyAccessor { PartitionKey toPartitionKey(PartitionKeyInternal partitionKeyInternal); } }
Yes - there is a race. Multiple threads might call setxxxAccessor concurrently - only one thread wil "win" (enter the else statement) - this "winner" will have set the accessor and also the the xxxClassLoaded flag. So threads calling into getxxxAccessor will only not call into the xxx.initialize (with calls setxxxAcccessor) after we are sure the accessor has been set. So, yes - there is a race - and yes - it is thread-safe now.
public static void setCosmosClientBuilderAccessor(final CosmosClientBuilderAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("CosmosClientBuilderAccessor already initialized!"); } else { logger.info("Setting CosmosClientBuilderAccessor..."); cosmosClientBuilderClassLoaded.set(true); } }
cosmosClientBuilderClassLoaded.set(true);
public static void setCosmosClientBuilderAccessor(final CosmosClientBuilderAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("CosmosClientBuilderAccessor already initialized!"); } else { logger.info("Setting CosmosClientBuilderAccessor..."); cosmosClientBuilderClassLoaded.set(true); } }
class CosmosClientBuilderHelper { private static final AtomicReference<CosmosClientBuilderAccessor> accessor = new AtomicReference<>(); private static final AtomicBoolean cosmosClientBuilderClassLoaded = new AtomicBoolean(false); private CosmosClientBuilderHelper() {} public static CosmosClientBuilderAccessor getCosmosClientBuilderAccessor() { if (!cosmosClientBuilderClassLoaded.get()) { logger.debug("Initializing CosmosClientBuilderAccessor..."); CosmosClientBuilder.doNothingButEnsureLoadingClass(); } CosmosClientBuilderAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("CosmosClientBuilderAccessor is not initialized yet!"); System.exit(9700); } return snapshot; } public interface CosmosClientBuilderAccessor { void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder, CosmosClientMetadataCachesSnapshot metadataCache); CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder); void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType); ApiType getCosmosClientApiType(CosmosClientBuilder builder); ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder); Configs getConfigs(CosmosClientBuilder builder); ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder); } }
class CosmosClientBuilderHelper { private static final AtomicReference<CosmosClientBuilderAccessor> accessor = new AtomicReference<>(); private static final AtomicBoolean cosmosClientBuilderClassLoaded = new AtomicBoolean(false); private CosmosClientBuilderHelper() {} public static CosmosClientBuilderAccessor getCosmosClientBuilderAccessor() { if (!cosmosClientBuilderClassLoaded.get()) { logger.debug("Initializing CosmosClientBuilderAccessor..."); CosmosClientBuilder.doNothingButEnsureLoadingClass(); } CosmosClientBuilderAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("CosmosClientBuilderAccessor is not initialized yet!"); System.exit(9700); } return snapshot; } public interface CosmosClientBuilderAccessor { void setCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder, CosmosClientMetadataCachesSnapshot metadataCache); CosmosClientMetadataCachesSnapshot getCosmosClientMetadataCachesSnapshot(CosmosClientBuilder builder); void setCosmosClientApiType(CosmosClientBuilder builder, ApiType apiType); ApiType getCosmosClientApiType(CosmosClientBuilder builder); ConnectionPolicy getConnectionPolicy(CosmosClientBuilder builder); Configs getConfigs(CosmosClientBuilder builder); ConsistencyLevel getConsistencyLevel(CosmosClientBuilder builder); } }
This will be logged exactly once - by the "winning" thread that actually set the accessor AtomicReference. IMO having it is info makes sense because it can be used to tell whether initialization has happened or not from logs.
public static void setPartitionKeyAccessor(final PartitionKeyAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("PartitionKeyAccessor already initialized!"); } else { logger.info("Setting PartitionKeyAccessor..."); partitionKeyClassLoaded.set(true); } }
logger.info("Setting PartitionKeyAccessor...");
public static void setPartitionKeyAccessor(final PartitionKeyAccessor newAccessor) { if (!accessor.compareAndSet(null, newAccessor)) { logger.debug("PartitionKeyAccessor already initialized!"); } else { logger.info("Setting PartitionKeyAccessor..."); partitionKeyClassLoaded.set(true); } }
class PartitionKeyHelper { private final static AtomicBoolean partitionKeyClassLoaded = new AtomicBoolean(false); private final static AtomicReference<PartitionKeyAccessor> accessor = new AtomicReference<>(); private PartitionKeyHelper() {} public static PartitionKeyAccessor getPartitionKeyAccessor() { if (!partitionKeyClassLoaded.get()) { logger.debug("Initializing PartitionKeyAccessor..."); PartitionKey.doNothingButEnsureLoadingClass(); } PartitionKeyAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("PartitionKeyAccessor is not initialized yet!"); System.exit(9701); } return snapshot; } public interface PartitionKeyAccessor { PartitionKey toPartitionKey(PartitionKeyInternal partitionKeyInternal); } }
class PartitionKeyHelper { private final static AtomicBoolean partitionKeyClassLoaded = new AtomicBoolean(false); private final static AtomicReference<PartitionKeyAccessor> accessor = new AtomicReference<>(); private PartitionKeyHelper() {} public static PartitionKeyAccessor getPartitionKeyAccessor() { if (!partitionKeyClassLoaded.get()) { logger.debug("Initializing PartitionKeyAccessor..."); PartitionKey.doNothingButEnsureLoadingClass(); } PartitionKeyAccessor snapshot = accessor.get(); if (snapshot == null) { logger.error("PartitionKeyAccessor is not initialized yet!"); System.exit(9701); } return snapshot; } public interface PartitionKeyAccessor { PartitionKey toPartitionKey(PartitionKeyInternal partitionKeyInternal); } }
Do we want to resolve this default before the warning check above?
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, which is no longer " + "considered secure. The default is v1 for compatibility reasons, but it is highly recommended " + "the version be set to v2 using this builder"); } this.encryptionVersion = encryptionVersion == null ? EncryptionVersion.V1 : encryptionVersion; /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion); }
this.encryptionVersion = encryptionVersion == null ? EncryptionVersion.V1 : encryptionVersion;
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); this.encryptionVersion = encryptionVersion == null ? EncryptionVersion.V1 : encryptionVersion; if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, which is no longer " + "considered secure. The default is v1 for compatibility reasons, but it is highly recommended " + "the version be set to v2 using the constructor"); } /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion); }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; /** * Creates a new instance of the EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } private BlobAsyncClient getUnencryptedBlobClient() { BlobClientBuilder builder = new BlobClientBuilder() .endpoint(endpoint) .containerName(containerName) .blobName(blobName) .snapshot(snapshot) .customerProvidedKey( customerProvidedKey == null ? null : new CustomerProvidedKey(customerProvidedKey.getEncryptionKey())) .encryptionScope(encryptionScope == null ? null : encryptionScope.getEncryptionScope()) .versionId(versionId) .serviceVersion(version) .httpClient(httpClient); if (storageSharedKeyCredential != null) { builder.credential(storageSharedKeyCredential); } else if (tokenCredential != null) { builder.credential(tokenCredential); } else if (azureSasCredential != null) { builder.credential(azureSasCredential); } else if (sasToken != null) { builder.credential(new AzureSasCredential(sasToken)); } return builder.buildAsyncClient(); } private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy || currPolicy instanceof FetchEncryptionVersionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); policies.add(0, new FetchEncryptionVersionPolicy(getUnencryptedBlobClient(), requiresEncryption)); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new FetchEncryptionVersionPolicy(getUnencryptedBlobClient(), requiresEncryption)); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); String applicationId = clientOptions.getApplicationId() != null ? clientOptions.getApplicationId() : logOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.put(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the blob name contains special characters, pass in the url encoded version of the blob name. </p> * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : Utility.urlEncode(parts.getBlobName()); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName, "'blobName' cannot be null."))); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } /** * Sets the encryption version for this client. For any new workloads, using version 2 or above is highly * encouraged as version 1 uses AES/CBC, which is no longer considered secure. For compatibility reasons, the * default value is version 1. * @param version The encryption version. * @return The updated builder. */ public EncryptedBlobClientBuilder encryptionVersion(EncryptionVersion version) { this.encryptionVersion = version; return this; } }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; /** * Creates a new instance of the EncryptedBlobClientBuilder * @deprecated Use {@link EncryptedBlobClientBuilder */ @Deprecated public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * * @param version The version of the client side encryption protocol to use. It is highly recommended that v2 be * preferred for security reasons, though v1 continues to be supported for compatibility reasons. Note that even a * client configured to encrypt using v2 can decrypt blobs that use the v1 protocol. */ public EncryptedBlobClientBuilder(EncryptionVersion version) { Objects.requireNonNull(version); logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = version; } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } private BlobAsyncClient getUnencryptedBlobClient() { BlobClientBuilder builder = new BlobClientBuilder() .endpoint(endpoint) .containerName(containerName) .blobName(blobName) .snapshot(snapshot) .customerProvidedKey( customerProvidedKey == null ? null : new CustomerProvidedKey(customerProvidedKey.getEncryptionKey())) .encryptionScope(encryptionScope == null ? null : encryptionScope.getEncryptionScope()) .versionId(versionId) .serviceVersion(version) .pipeline(this.httpPipeline) .httpClient(httpClient) .configuration(configuration) .retryOptions(this.retryOptions) .clientOptions(this.clientOptions); if (storageSharedKeyCredential != null) { builder.credential(storageSharedKeyCredential); } else if (tokenCredential != null) { builder.credential(tokenCredential); } else if (azureSasCredential != null) { builder.credential(azureSasCredential); } else if (sasToken != null) { builder.credential(new AzureSasCredential(sasToken)); } for (HttpPipelinePolicy policy : perCallPolicies) { builder.addPolicy(policy); } for (HttpPipelinePolicy policy : perRetryPolicies) { builder.addPolicy(policy); } return builder.buildAsyncClient(); } private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption, getUnencryptedBlobClient())); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption, getUnencryptedBlobClient())); String applicationId = clientOptions.getApplicationId() != null ? clientOptions.getApplicationId() : logOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.put(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the blob name contains special characters, pass in the url encoded version of the blob name. </p> * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : Utility.urlEncode(parts.getBlobName()); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName, "'blobName' cannot be null."))); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } }
This was typed a while ago in a half-finished review. We have since had discussions. Marking resolved.
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, which is no longer " + "considered secure. The default is v1 for compatibility reasons, but it is highly recommended " + "the version be set to v2 using this builder"); } this.encryptionVersion = encryptionVersion == null ? EncryptionVersion.V1 : encryptionVersion; /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion); }
+ "the version be set to v2 using this builder");
public EncryptedBlobAsyncClient buildEncryptedBlobAsyncClient() { Objects.requireNonNull(blobName, "'blobName' cannot be null."); checkValidEncryptionParameters(); this.encryptionVersion = encryptionVersion == null ? EncryptionVersion.V1 : encryptionVersion; if (EncryptionVersion.V1.equals(this.encryptionVersion)) { LOGGER.warning("Client is being configured to use v1 of client side encryption, which is no longer " + "considered secure. The default is v1 for compatibility reasons, but it is highly recommended " + "the version be set to v2 using the constructor"); } /* Implicit and explicit root container access are functionally equivalent, but explicit references are easier to read and debug. */ if (CoreUtils.isNullOrEmpty(containerName)) { containerName = BlobContainerAsyncClient.ROOT_CONTAINER_NAME; } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); return new EncryptedBlobAsyncClient(addBlobUserAgentModificationPolicy(getHttpPipeline()), endpoint, serviceVersion, accountName, containerName, blobName, snapshot, customerProvidedKey, encryptionScope, keyWrapper, keyWrapAlgorithm, versionId, encryptionVersion); }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; /** * Creates a new instance of the EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } private BlobAsyncClient getUnencryptedBlobClient() { BlobClientBuilder builder = new BlobClientBuilder() .endpoint(endpoint) .containerName(containerName) .blobName(blobName) .snapshot(snapshot) .customerProvidedKey( customerProvidedKey == null ? null : new CustomerProvidedKey(customerProvidedKey.getEncryptionKey())) .encryptionScope(encryptionScope == null ? null : encryptionScope.getEncryptionScope()) .versionId(versionId) .serviceVersion(version) .httpClient(httpClient); if (storageSharedKeyCredential != null) { builder.credential(storageSharedKeyCredential); } else if (tokenCredential != null) { builder.credential(tokenCredential); } else if (azureSasCredential != null) { builder.credential(azureSasCredential); } else if (sasToken != null) { builder.credential(new AzureSasCredential(sasToken)); } return builder.buildAsyncClient(); } private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy || currPolicy instanceof FetchEncryptionVersionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); policies.add(0, new FetchEncryptionVersionPolicy(getUnencryptedBlobClient(), requiresEncryption)); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new FetchEncryptionVersionPolicy(getUnencryptedBlobClient(), requiresEncryption)); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption)); String applicationId = clientOptions.getApplicationId() != null ? clientOptions.getApplicationId() : logOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.put(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the blob name contains special characters, pass in the url encoded version of the blob name. </p> * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : Utility.urlEncode(parts.getBlobName()); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName, "'blobName' cannot be null."))); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } /** * Sets the encryption version for this client. For any new workloads, using version 2 or above is highly * encouraged as version 1 uses AES/CBC, which is no longer considered secure. For compatibility reasons, the * default value is version 1. * @param version The encryption version. * @return The updated builder. */ public EncryptedBlobClientBuilder encryptionVersion(EncryptionVersion version) { this.encryptionVersion = version; return this; } }
class EncryptedBlobClientBuilder implements TokenCredentialTrait<EncryptedBlobClientBuilder>, ConnectionStringTrait<EncryptedBlobClientBuilder>, AzureNamedKeyCredentialTrait<EncryptedBlobClientBuilder>, AzureSasCredentialTrait<EncryptedBlobClientBuilder>, HttpTrait<EncryptedBlobClientBuilder>, ConfigurationTrait<EncryptedBlobClientBuilder>, EndpointTrait<EncryptedBlobClientBuilder> { private static final ClientLogger LOGGER = new ClientLogger(EncryptedBlobClientBuilder.class); private static final Map<String, String> PROPERTIES = CoreUtils.getProperties("azure-storage-blob-cryptography.properties"); private static final String SDK_NAME = "name"; private static final String SDK_VERSION = "version"; private static final String CLIENT_NAME = PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String CLIENT_VERSION = PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private static final String BLOB_CLIENT_NAME = USER_AGENT_PROPERTIES.getOrDefault(SDK_NAME, "UnknownName"); private static final String BLOB_CLIENT_VERSION = USER_AGENT_PROPERTIES.getOrDefault(SDK_VERSION, "UnknownVersion"); private String endpoint; private String accountName; private String containerName; private String blobName; private String snapshot; private String versionId; private boolean requiresEncryption; private EncryptionVersion encryptionVersion; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private AzureSasCredential azureSasCredential; private String sasToken; private HttpClient httpClient; private final List<HttpPipelinePolicy> perCallPolicies = new ArrayList<>(); private final List<HttpPipelinePolicy> perRetryPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions; private RetryOptions coreRetryOptions; private HttpPipeline httpPipeline; private ClientOptions clientOptions = new ClientOptions(); private Configuration configuration; private AsyncKeyEncryptionKey keyWrapper; private AsyncKeyEncryptionKeyResolver keyResolver; private String keyWrapAlgorithm; private BlobServiceVersion version; private CpkInfo customerProvidedKey; private EncryptionScope encryptionScope; /** * Creates a new instance of the EncryptedBlobClientBuilder * @deprecated Use {@link EncryptedBlobClientBuilder */ @Deprecated public EncryptedBlobClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * * @param version The version of the client side encryption protocol to use. It is highly recommended that v2 be * preferred for security reasons, though v1 continues to be supported for compatibility reasons. Note that even a * client configured to encrypt using v2 can decrypt blobs that use the v1 protocol. */ public EncryptedBlobClientBuilder(EncryptionVersion version) { Objects.requireNonNull(version); logOptions = getDefaultHttpLogOptions(); this.encryptionVersion = version; } /** * Creates a {@link EncryptedBlobClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * <pre> * EncryptedBlobAsyncClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobAsyncClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobAsyncClient --> * * @return a {@link EncryptedBlobClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ public EncryptedBlobClient buildEncryptedBlobClient() { return new EncryptedBlobClient(buildEncryptedBlobAsyncClient()); } /** * Creates a {@link EncryptedBlobAsyncClient} based on options set in the Builder. * * <p><strong>Code Samples</strong></p> * * <!-- src_embed com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * <pre> * EncryptedBlobClient client = new EncryptedBlobClientBuilder& * .key& * .keyResolver& * .connectionString& * .buildEncryptedBlobClient& * </pre> * <!-- end com.azure.storage.blob.specialized.cryptography.EncryptedBlobClientBuilder.buildEncryptedBlobClient --> * * @return a {@link EncryptedBlobAsyncClient} created from the configurations in this builder. * @throws NullPointerException If {@code endpoint}, {@code containerName}, or {@code blobName} is {@code null}. * @throws IllegalStateException If multiple credentials have been specified. * @throws IllegalStateException If both {@link * and {@link */ private HttpPipeline addBlobUserAgentModificationPolicy(HttpPipeline pipeline) { List<HttpPipelinePolicy> policies = new ArrayList<>(); for (int i = 0; i < pipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = pipeline.getPolicy(i); policies.add(currPolicy); if (currPolicy instanceof UserAgentPolicy) { policies.add(new BlobUserAgentModificationPolicy(CLIENT_NAME, CLIENT_VERSION)); } } return new HttpPipelineBuilder() .httpClient(pipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } private BlobAsyncClient getUnencryptedBlobClient() { BlobClientBuilder builder = new BlobClientBuilder() .endpoint(endpoint) .containerName(containerName) .blobName(blobName) .snapshot(snapshot) .customerProvidedKey( customerProvidedKey == null ? null : new CustomerProvidedKey(customerProvidedKey.getEncryptionKey())) .encryptionScope(encryptionScope == null ? null : encryptionScope.getEncryptionScope()) .versionId(versionId) .serviceVersion(version) .pipeline(this.httpPipeline) .httpClient(httpClient) .configuration(configuration) .retryOptions(this.retryOptions) .clientOptions(this.clientOptions); if (storageSharedKeyCredential != null) { builder.credential(storageSharedKeyCredential); } else if (tokenCredential != null) { builder.credential(tokenCredential); } else if (azureSasCredential != null) { builder.credential(azureSasCredential); } else if (sasToken != null) { builder.credential(new AzureSasCredential(sasToken)); } for (HttpPipelinePolicy policy : perCallPolicies) { builder.addPolicy(policy); } for (HttpPipelinePolicy policy : perRetryPolicies) { builder.addPolicy(policy); } return builder.buildAsyncClient(); } private HttpPipeline getHttpPipeline() { CredentialValidator.validateSingleCredentialIsPresent( storageSharedKeyCredential, tokenCredential, azureSasCredential, sasToken, LOGGER); if (httpPipeline != null) { List<HttpPipelinePolicy> policies = new ArrayList<>(); boolean decryptionPolicyPresent = false; for (int i = 0; i < httpPipeline.getPolicyCount(); i++) { HttpPipelinePolicy currPolicy = httpPipeline.getPolicy(i); if (currPolicy instanceof BlobDecryptionPolicy) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("The passed pipeline was already" + " configured for encryption/decryption in a way that might conflict with the passed key " + "information. Please ensure that the passed pipeline is not already configured for " + "encryption/decryption")); } policies.add(currPolicy); } policies.add(0, new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption, getUnencryptedBlobClient())); return new HttpPipelineBuilder() .httpClient(httpPipeline.getHttpClient()) .policies(policies.toArray(new HttpPipelinePolicy[0])) .build(); } Configuration userAgentConfiguration = (configuration == null) ? Configuration.NONE : configuration; List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new BlobDecryptionPolicy(keyWrapper, keyResolver, requiresEncryption, getUnencryptedBlobClient())); String applicationId = clientOptions.getApplicationId() != null ? clientOptions.getApplicationId() : logOptions.getApplicationId(); policies.add(new UserAgentPolicy(applicationId, BLOB_CLIENT_NAME, BLOB_CLIENT_VERSION, userAgentConfiguration)); policies.add(new RequestIdPolicy()); policies.addAll(perCallPolicies); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(BuilderUtils.createRetryPolicy(retryOptions, coreRetryOptions, LOGGER)); policies.add(new AddDatePolicy()); HttpHeaders headers = new HttpHeaders(); clientOptions.getHeaders().forEach(header -> headers.put(header.getName(), header.getValue())); if (headers.getSize() > 0) { policies.add(new AddHeadersPolicy(headers)); } policies.add(new MetadataValidationPolicy()); if (storageSharedKeyCredential != null) { policies.add(new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential)); } else if (tokenCredential != null) { BuilderHelper.httpsValidation(tokenCredential, "bearer token", endpoint, LOGGER); policies.add(new BearerTokenAuthenticationPolicy(tokenCredential, Constants.STORAGE_SCOPE)); } else if (azureSasCredential != null) { policies.add(new AzureSasCredentialPolicy(azureSasCredential, false)); } else if (sasToken != null) { policies.add(new AzureSasCredentialPolicy(new AzureSasCredential(sasToken), false)); } policies.addAll(perRetryPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Sets the encryption key parameters for the client * * @param key An object of type {@link AsyncKeyEncryptionKey} that is used to wrap/unwrap the content encryption key * @param keyWrapAlgorithm The {@link String} used to wrap the key. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder key(AsyncKeyEncryptionKey key, String keyWrapAlgorithm) { this.keyWrapper = key; this.keyWrapAlgorithm = keyWrapAlgorithm; return this; } /** * Sets the encryption parameters for this client * * @param keyResolver The key resolver used to select the correct key for decrypting existing blobs. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder keyResolver(AsyncKeyEncryptionKeyResolver keyResolver) { this.keyResolver = keyResolver; return this; } private void checkValidEncryptionParameters() { if (this.keyWrapper == null && this.keyResolver == null) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Key and KeyResolver cannot both be null")); } if (this.keyWrapper != null && this.keyWrapAlgorithm == null) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("Key Wrap Algorithm must be specified with a Key.")); } } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link StorageSharedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public EncryptedBlobClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasToken = null; return this; } /** * Sets the {@link AzureNamedKeyCredential} used to authorize requests sent to the service. * * @param credential {@link AzureNamedKeyCredential}. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureNamedKeyCredential credential) { Objects.requireNonNull(credential, "'credential' cannot be null."); return credential(StorageSharedKeyCredential.fromAzureNamedKeyCredential(credential)); } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. Refer to the Azure SDK for Java * <a href="https: * documentation for more details on proper usage of the {@link TokenCredential} type. * * @param credential {@link TokenCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasToken = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. This string should only be the query parameters * (with or without a leading '?') and not a full url. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public EncryptedBlobClientBuilder sasToken(String sasToken) { this.sasToken = Objects.requireNonNull(sasToken, "'sasToken' cannot be null."); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the {@link AzureSasCredential} used to authorize requests sent to the service. * * @param credential {@link AzureSasCredential} used to authorize requests sent to the service. * @return the updated EncryptedBlobClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ @Override public EncryptedBlobClientBuilder credential(AzureSasCredential credential) { this.azureSasCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); return this; } /** * Clears the credential used to authorize the request. * * <p>This is for blobs that are publicly accessible.</p> * * @return the updated EncryptedBlobClientBuilder */ public EncryptedBlobClientBuilder setAnonymousAccess() { this.storageSharedKeyCredential = null; this.tokenCredential = null; this.azureSasCredential = null; this.sasToken = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated EncryptedBlobClientBuilder * @throws IllegalArgumentException If {@code connectionString} is invalid. */ @Override public EncryptedBlobClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, LOGGER); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw LOGGER .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the service endpoint, additionally parses it for information (SAS token, container name, blob name) * * <p>If the blob name contains special characters, pass in the url encoded version of the blob name. </p> * * <p>If the endpoint is to a blob in the root container, this method will fail as it will interpret the blob name * as the container name. With only one path element, it is impossible to distinguish between a container name and a * blob in the root container, so it is assumed to be the container name as this is much more common. When working * with blobs in the root container, it is best to set the endpoint to the account url and specify the blob name * separately using the {@link EncryptedBlobClientBuilder * * @param endpoint URL of the service * @return the updated EncryptedBlobClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ @Override public EncryptedBlobClientBuilder endpoint(String endpoint) { try { URL url = new URL(endpoint); BlobUrlParts parts = BlobUrlParts.parse(url); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); this.containerName = parts.getBlobContainerName() == null ? this.containerName : parts.getBlobContainerName(); this.blobName = parts.getBlobName() == null ? this.blobName : Utility.urlEncode(parts.getBlobName()); this.snapshot = parts.getSnapshot(); this.versionId = parts.getVersionId(); String sasToken = parts.getCommonSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw LOGGER.logExceptionAsError( new IllegalArgumentException("The Azure Storage Blob endpoint url is malformed.", ex)); } return this; } /** * Sets the name of the container that contains the blob. * * @param containerName Name of the container. If the value {@code null} or empty the root container, {@code $root}, * will be used. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder containerName(String containerName) { this.containerName = containerName; return this; } /** * Sets the name of the blob. * * @param blobName Name of the blob. If the blob name contains special characters, pass in the url encoded version * of the blob name. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code blobName} is {@code null} */ public EncryptedBlobClientBuilder blobName(String blobName) { this.blobName = Utility.urlEncode(Utility.urlDecode(Objects.requireNonNull(blobName, "'blobName' cannot be null."))); return this; } /** * Sets the snapshot identifier of the blob. * * @param snapshot Snapshot identifier for the blob. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the version identifier of the blob. * * @param versionId Version identifier for the blob, pass {@code null} to interact with the latest blob version. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder versionId(String versionId) { this.versionId = versionId; return this; } /** * Sets the {@link HttpClient} to use for sending and receiving requests to and from the service. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param httpClient The {@link HttpClient} to use for requests. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { LOGGER.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a {@link HttpPipelinePolicy pipeline policy} to apply on each request sent. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param pipelinePolicy A {@link HttpPipelinePolicy pipeline policy}. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ @Override public EncryptedBlobClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null"); if (pipelinePolicy.getPipelinePosition() == HttpPipelinePosition.PER_CALL) { perCallPolicies.add(pipelinePolicy); } else { perRetryPolicies.add(pipelinePolicy); } return this; } /** * Sets the {@link HttpLogOptions logging configuration} to use when sending and receiving requests to and from * the service. If a {@code logLevel} is not provided, default value of {@link HttpLogDetailLevel * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param logOptions The {@link HttpLogOptions logging configuration} to use when sending and receiving requests to * and from the service. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage allowlist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * Setting this is mutually exclusive with using {@link * * @param retryOptions {@link RequestRetryOptions}. * @return the updated EncryptedBlobClientBuilder object. */ public EncryptedBlobClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = retryOptions; return this; } /** * Sets the {@link RetryOptions} for all the requests made through the client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * Setting this is mutually exclusive with using {@link * Consider using {@link * * @param retryOptions The {@link RetryOptions} to use for all the requests made through the client. * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder retryOptions(RetryOptions retryOptions) { this.coreRetryOptions = retryOptions; return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * <p> * The {@link * {@link * not ignored when {@code pipeline} is set. * * @return the updated EncryptedBlobClientBuilder object */ @Override public EncryptedBlobClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { LOGGER.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Allows for setting common properties such as application ID, headers, proxy configuration, etc. Note that it is * recommended that this method be called with an instance of the {@link HttpClientOptions} * class (a subclass of the {@link ClientOptions} base class). The HttpClientOptions subclass provides more * configuration options suitable for HTTP clients, which is applicable for any class that implements this HttpTrait * interface. * * <p><strong>Note:</strong> It is important to understand the precedence order of the HttpTrait APIs. In * particular, if a {@link HttpPipeline} is specified, this takes precedence over all other APIs in the trait, and * they will be ignored. If no {@link HttpPipeline} is specified, a HTTP pipeline will be constructed internally * based on the settings provided to this trait. Additionally, there may be other APIs in types that implement this * trait that are also ignored if an {@link HttpPipeline} is specified, so please be sure to refer to the * documentation of types that implement this trait to understand the full set of implications.</p> * * @param clientOptions A configured instance of {@link HttpClientOptions}. * @see HttpClientOptions * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code clientOptions} is {@code null}. */ @Override public EncryptedBlobClientBuilder clientOptions(ClientOptions clientOptions) { this.clientOptions = Objects.requireNonNull(clientOptions, "'clientOptions' cannot be null."); return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version of the client library will have the result of potentially moving to a newer service version. * <p> * Targeting a specific service version may also mean that the service will return an error for newer APIs. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey {@link CustomerProvidedKey} * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@code encryption scope} that is used to encrypt blob contents on the server. * * @param encryptionScope Encryption scope containing the encryption key information. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder encryptionScope(String encryptionScope) { if (encryptionScope == null) { this.encryptionScope = null; } else { this.encryptionScope = new EncryptionScope().setEncryptionScope(encryptionScope); } return this; } /** * Configures the builder based on the passed {@link BlobClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobClient BlobClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobClient(BlobClient blobClient) { Objects.requireNonNull(blobClient); return client(blobClient.getHttpPipeline(), blobClient.getBlobUrl(), blobClient.getServiceVersion()); } /** * Configures the builder based on the passed {@link BlobAsyncClient}. This will set the {@link HttpPipeline}, * {@link URL} and {@link BlobServiceVersion} that are used to interact with the service. Note that the underlying * pipeline should not already be configured for encryption/decryption. * * <p>If {@code pipeline} is set, all other settings are ignored, aside from {@link * {@link * * <p>Note that for security reasons, this method does not copy over the {@link CustomerProvidedKey} and * encryption scope properties from the provided client. To set CPK, please use * {@link * * @param blobAsyncClient BlobAsyncClient used to configure the builder. * @return the updated EncryptedBlobClientBuilder object * @throws NullPointerException If {@code containerClient} is {@code null}. */ public EncryptedBlobClientBuilder blobAsyncClient(BlobAsyncClient blobAsyncClient) { Objects.requireNonNull(blobAsyncClient); return client(blobAsyncClient.getHttpPipeline(), blobAsyncClient.getBlobUrl(), blobAsyncClient.getServiceVersion()); } /** * Helper method to transform a regular client into an encrypted client * * @param httpPipeline {@link HttpPipeline} * @param endpoint The endpoint. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated EncryptedBlobClientBuilder object */ private EncryptedBlobClientBuilder client(HttpPipeline httpPipeline, String endpoint, BlobServiceVersion version) { this.endpoint(endpoint); this.serviceVersion(version); return this.pipeline(httpPipeline); } /** * Sets the requires encryption option. * * @param requiresEncryption Whether encryption is enforced by this client. Client will throw if data is * downloaded and it is not encrypted. * @return the updated EncryptedBlobClientBuilder object */ public EncryptedBlobClientBuilder requiresEncryption(boolean requiresEncryption) { this.requiresEncryption = requiresEncryption; return this; } }
if it's multi request process then having the orchestrator prefetch EncryptionData and set it into Context is good idea.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData data = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); return data == null ? next.process() : Mono.just(data) .flatMap(encryptionData -> { EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null && encryptionData != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return Mono.zip(Mono.just(encryptionData), Mono.just(encryptedRange)); }) .flatMap(tuple2 -> next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY) == null) { return Mono.just(httpResponse); } tuple2.getT2().setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = tuple2.getT1().getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (tuple2.getT2().toBlobRange().getOffset() + tuple2.getT2().toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), tuple2.getT2(), padding, tuple2.getT1(), httpResponse.getRequest().getUrl().toString()); return Mono.just(new DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } })); }); } }
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); String etag = response.getValue().getETag(); requestHeaders.set("ETag", etag); if (encryptionData == null) { return next.process(); } EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return next.process().map(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(ENCRYPTION_METADATA_HEADER) == null) { return httpResponse; } encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return new DecryptedResponse(httpResponse, plainTextData); } else { return httpResponse; } }); }); } }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
for GA: we should introduce BlobRequestCondition(HttpHeaders) ctor.
private BlobRequestConditions extractRequestConditionsFromRequest(HttpHeaders requestHeaders) { return new BlobRequestConditions() .setLeaseId(requestHeaders.getValue("x-ms-lease-id")) .setIfUnmodifiedSince(requestHeaders.getValue("If-Unmodified-Since") == null ? null : new DateTimeRfc1123(requestHeaders.getValue("If-Unmodified-Since")).getDateTime()) .setIfNoneMatch(requestHeaders.getValue("If-None-Match")) .setIfMatch(requestHeaders.getValue("If-Match")) .setIfModifiedSince(requestHeaders.getValue("If-Modified-Since") == null ? null : new DateTimeRfc1123(requestHeaders.getValue("If-Modified-Since")).getDateTime()); }
return new BlobRequestConditions()
private BlobRequestConditions extractRequestConditionsFromRequest(HttpHeaders requestHeaders) { return new BlobRequestConditions() .setLeaseId(requestHeaders.getValue("x-ms-lease-id")) .setIfUnmodifiedSince(requestHeaders.getValue("If-Unmodified-Since") == null ? null : new DateTimeRfc1123(requestHeaders.getValue("If-Unmodified-Since")).getDateTime()) .setIfNoneMatch(requestHeaders.getValue("If-None-Match")) .setIfMatch(requestHeaders.getValue("If-Match")) .setIfModifiedSince(requestHeaders.getValue("If-Modified-Since") == null ? null : new DateTimeRfc1123(requestHeaders.getValue("If-Modified-Since")).getDateTime()); }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
I believe there has been similar logic in download to pass a tuple with one of the elements being prefetched get properties data.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData data = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); return data == null ? next.process() : Mono.just(data) .flatMap(encryptionData -> { EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null && encryptionData != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return Mono.zip(Mono.just(encryptionData), Mono.just(encryptedRange)); }) .flatMap(tuple2 -> next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY) == null) { return Mono.just(httpResponse); } tuple2.getT2().setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = tuple2.getT1().getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (tuple2.getT2().toBlobRange().getOffset() + tuple2.getT2().toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), tuple2.getT2(), padding, tuple2.getT1(), httpResponse.getRequest().getUrl().toString()); return Mono.just(new DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } })); }); } }
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); String etag = response.getValue().getETag(); requestHeaders.set("ETag", etag); if (encryptionData == null) { return next.process(); } EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return next.process().map(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(ENCRYPTION_METADATA_HEADER) == null) { return httpResponse; } encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return new DecryptedResponse(httpResponse, plainTextData); } else { return httpResponse; } }); }); } }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
minor nit: for readability we should turn `Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY` into a constant `EncryptionConstants.ENCRYPTION_META_DATA_KEY` (doesn't have to be that class) or make it into a utility API somewhere. This will help centralize all uses of it and help prevent a case where we may accidentally do `Constants.HeaderConstants.X_MS_META + ENCRYPTION_DATA_KEY` and drop the needed `-`
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData data = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); return data == null ? next.process() : Mono.just(data) .flatMap(encryptionData -> { EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null && encryptionData != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return Mono.zip(Mono.just(encryptionData), Mono.just(encryptedRange)); }) .flatMap(tuple2 -> next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY) == null) { return Mono.just(httpResponse); } tuple2.getT2().setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = tuple2.getT1().getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (tuple2.getT2().toBlobRange().getOffset() + tuple2.getT2().toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), tuple2.getT2(), padding, tuple2.getT1(), httpResponse.getRequest().getUrl().toString()); return Mono.just(new DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } })); }); } }
httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-"
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); String etag = response.getValue().getETag(); requestHeaders.set("ETag", etag); if (encryptionData == null) { return next.process(); } EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return next.process().map(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(ENCRYPTION_METADATA_HEADER) == null) { return httpResponse; } encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return new DecryptedResponse(httpResponse, plainTextData); } else { return httpResponse; } }); }); } }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
Does this end up skipping the rest of the `HttpPipeline` when data isn't null?
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData data = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); return data == null ? next.process() : Mono.just(data) .flatMap(encryptionData -> { EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null && encryptionData != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return Mono.zip(Mono.just(encryptionData), Mono.just(encryptedRange)); }) .flatMap(tuple2 -> next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY) == null) { return Mono.just(httpResponse); } tuple2.getT2().setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = tuple2.getT1().getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (tuple2.getT2().toBlobRange().getOffset() + tuple2.getT2().toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), tuple2.getT2(), padding, tuple2.getT1(), httpResponse.getRequest().getUrl().toString()); return Mono.just(new DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } })); }); } }
return data == null ? next.process() : Mono.just(data)
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); String etag = response.getValue().getETag(); requestHeaders.set("ETag", etag); if (encryptionData == null) { return next.process(); } EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return next.process().map(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(ENCRYPTION_METADATA_HEADER) == null) { return httpResponse; } encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return new DecryptedResponse(httpResponse, plainTextData); } else { return httpResponse; } }); }); } }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
minor nit: may I suggest making this into an if-return block for data == null ```java if (data == null) { return next.process(); } return //decryption logic ``` as the else in the ternary operator is many lines
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData data = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); return data == null ? next.process() : Mono.just(data) .flatMap(encryptionData -> { EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null && encryptionData != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return Mono.zip(Mono.just(encryptionData), Mono.just(encryptedRange)); }) .flatMap(tuple2 -> next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY) == null) { return Mono.just(httpResponse); } tuple2.getT2().setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = tuple2.getT1().getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (tuple2.getT2().toBlobRange().getOffset() + tuple2.getT2().toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), tuple2.getT2(), padding, tuple2.getT1(), httpResponse.getRequest().getUrl().toString()); return Mono.just(new DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } })); }); } }
return data == null ? next.process() : Mono.just(data)
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); String etag = response.getValue().getETag(); requestHeaders.set("ETag", etag); if (encryptionData == null) { return next.process(); } EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return next.process().map(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(ENCRYPTION_METADATA_HEADER) == null) { return httpResponse; } encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return new DecryptedResponse(httpResponse, plainTextData); } else { return httpResponse; } }); }); } }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
To me, it looks like most these calls are operating synchronously, do you think it'd be easier to read and debug if calling into async code is held to the end? For example creating the `Tuple<EncryptionData, EncryptedBlobRange>` is just pulling values from headers, could that first flatMap be removed?
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData data = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); return data == null ? next.process() : Mono.just(data) .flatMap(encryptionData -> { EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null && encryptionData != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return Mono.zip(Mono.just(encryptionData), Mono.just(encryptedRange)); }) .flatMap(tuple2 -> next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY) == null) { return Mono.just(httpResponse); } tuple2.getT2().setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = tuple2.getT1().getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (tuple2.getT2().toBlobRange().getOffset() + tuple2.getT2().toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), tuple2.getT2(), padding, tuple2.getT1(), httpResponse.getRequest().getUrl().toString()); return Mono.just(new DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } })); }); } }
}
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); String etag = response.getValue().getETag(); requestHeaders.set("ETag", etag); if (encryptionData == null) { return next.process(); } EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return next.process().map(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(ENCRYPTION_METADATA_HEADER) == null) { return httpResponse; } encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return new DecryptedResponse(httpResponse, plainTextData); } else { return httpResponse; } }); }); } }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
Added to list
private BlobRequestConditions extractRequestConditionsFromRequest(HttpHeaders requestHeaders) { return new BlobRequestConditions() .setLeaseId(requestHeaders.getValue("x-ms-lease-id")) .setIfUnmodifiedSince(requestHeaders.getValue("If-Unmodified-Since") == null ? null : new DateTimeRfc1123(requestHeaders.getValue("If-Unmodified-Since")).getDateTime()) .setIfNoneMatch(requestHeaders.getValue("If-None-Match")) .setIfMatch(requestHeaders.getValue("If-Match")) .setIfModifiedSince(requestHeaders.getValue("If-Modified-Since") == null ? null : new DateTimeRfc1123(requestHeaders.getValue("If-Modified-Since")).getDateTime()); }
return new BlobRequestConditions()
private BlobRequestConditions extractRequestConditionsFromRequest(HttpHeaders requestHeaders) { return new BlobRequestConditions() .setLeaseId(requestHeaders.getValue("x-ms-lease-id")) .setIfUnmodifiedSince(requestHeaders.getValue("If-Unmodified-Since") == null ? null : new DateTimeRfc1123(requestHeaders.getValue("If-Unmodified-Since")).getDateTime()) .setIfNoneMatch(requestHeaders.getValue("If-None-Match")) .setIfMatch(requestHeaders.getValue("If-Match")) .setIfModifiedSince(requestHeaders.getValue("If-Modified-Since") == null ? null : new DateTimeRfc1123(requestHeaders.getValue("If-Modified-Since")).getDateTime()); }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
Added to list
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData data = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); return data == null ? next.process() : Mono.just(data) .flatMap(encryptionData -> { EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null && encryptionData != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return Mono.zip(Mono.just(encryptionData), Mono.just(encryptedRange)); }) .flatMap(tuple2 -> next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY) == null) { return Mono.just(httpResponse); } tuple2.getT2().setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = tuple2.getT1().getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (tuple2.getT2().toBlobRange().getOffset() + tuple2.getT2().toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), tuple2.getT2(), padding, tuple2.getT1(), httpResponse.getRequest().getUrl().toString()); return Mono.just(new DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } })); }); } }
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders requestHeaders = context.getHttpRequest().getHeaders(); String initialRangeHeader = requestHeaders.getValue(RANGE_HEADER); if (initialRangeHeader == null) { return next.process().flatMap(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); /* * Deserialize encryption data. * If there is no encryption data set on the blob, then we can return the request as is since we * didn't expand the range at all. */ EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( httpResponse.getHeaderValue(Constants.HeaderConstants.X_MS_META + "-" + ENCRYPTION_DATA_KEY), requiresEncryption); if (encryptionData == null) { return Mono.just(httpResponse); } /* * We will need to know the total size of the data to know when to finalize the decryption. If it * was not set originally with the intent of downloading the whole blob, update it here. * If there was no range set on the request, we skipped instantiating a BlobRange as we did not have * encryption data at the time. Instantiate now with a BlobRange that indicates a full blob. */ EncryptedBlobRange encryptedRange = new EncryptedBlobRange(new BlobRange(0), encryptionData); encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the encryption * block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol().equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return Mono.just(new BlobDecryptionPolicy.DecryptedResponse(httpResponse, plainTextData)); } else { return Mono.just(httpResponse); } }); } else { BlobRequestConditions rc = extractRequestConditionsFromRequest(requestHeaders); return this.blobClient.getPropertiesWithResponse(rc).flatMap(response -> { EncryptionData encryptionData = EncryptionData.getAndValidateEncryptionData( response.getValue().getMetadata().get(CryptographyConstants.ENCRYPTION_DATA_KEY), requiresEncryption); String etag = response.getValue().getETag(); requestHeaders.set("ETag", etag); if (encryptionData == null) { return next.process(); } EncryptedBlobRange encryptedRange = EncryptedBlobRange.getEncryptedBlobRangeFromHeader( initialRangeHeader, encryptionData); if (context.getHttpRequest().getHeaders().getValue(RANGE_HEADER) != null) { requestHeaders.set(RANGE_HEADER, encryptedRange.toBlobRange().toString()); } return next.process().map(httpResponse -> { if (httpResponse.getRequest().getHttpMethod() == HttpMethod.GET && httpResponse.getBody() != null) { HttpHeaders responseHeaders = httpResponse.getHeaders(); if (httpResponse.getHeaderValue(ENCRYPTION_METADATA_HEADER) == null) { return httpResponse; } encryptedRange.setAdjustedDownloadCount( Long.parseLong(responseHeaders.getValue(CONTENT_LENGTH))); /* * We expect padding only if we are at the end of a blob and it is not a multiple of the * encryption block size. Padding is only ever present in track 1. */ boolean padding = encryptionData.getEncryptionAgent().getProtocol() .equals(ENCRYPTION_PROTOCOL_V1) && (encryptedRange.toBlobRange().getOffset() + encryptedRange.toBlobRange().getCount() > (blobSize(responseHeaders) - ENCRYPTION_BLOCK_SIZE)); Flux<ByteBuffer> plainTextData = this.decryptBlob(httpResponse.getBody(), encryptedRange, padding, encryptionData, httpResponse.getRequest().getUrl().toString()); return new DecryptedResponse(httpResponse, plainTextData); } else { return httpResponse; } }); }); } }
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
class with the specified key and resolver. * <p> * If the generated policy is intended to be used for encryption, users are expected to provide a key at the * minimum. The absence of key will cause an exception to be thrown during encryption. If the generated policy is * intended to be used for decryption, users can provide a keyResolver. The client library will - 1. Invoke the key * resolver if specified to get the key. 2. If resolver is not specified but a key is specified, match the key id on * the key and use it. * * @param key An object of type {@link AsyncKeyEncryptionKey}
it's not clear to me why checking `!suppressed` ? in the scenario: `CLIENT` -> `CLIENT` -> `CLIENT`, would this lead to the second `CLIENT` span getting suppressed, but then the third `CLIENT` not getting suppressed?
private static boolean shouldSuppress(SpanKind kind, Context context) { if (isClientCall(kind)) { boolean suppress = getBoolean(CLIENT_METHOD_CALL_FLAG, context); boolean suppressed = getBoolean(SUPPRESSED_SPAN_FLAG, context); return suppress && !suppressed; } return false; }
return suppress && !suppressed;
private static boolean shouldSuppress(SpanKind kind, Context context) { return isClientCall(kind) && getBoolean(CLIENT_METHOD_CALL_FLAG, context); }
class object */ @SuppressWarnings("unchecked") private static <T> T getOrNull(Context context, String key, Class<T> clazz) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { LOGGER.verbose("Could not extract key '{}' of type '{}' from context.", key, clazz); return null; }); return (T) result; }
class object */ @SuppressWarnings("unchecked") private static <T> T getOrNull(Context context, String key, Class<T> clazz) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { LOGGER.verbose("Could not extract key '{}' of type '{}' from context.", key, clazz); return null; }); return (T) result; }
```suggestion outer = openTelemetryTracer.start("outer", outer, ProcessKind.SEND); ```
public void suppressNestedInterleavedClientSpan() { Context outer = openTelemetryTracer.getSharedSpanBuilder("outer", Context.NONE); openTelemetryTracer.addLink(outer.addData(SPAN_CONTEXT_KEY, TEST_CONTEXT)); outer = openTelemetryTracer.start("innerSuppressed", outer, ProcessKind.SEND); Context inner1Suppressed = openTelemetryTracer.start("innerSuppressed", outer); Context inner1NotSuppressed = openTelemetryTracer.start("innerNotSuppressed", new StartSpanOptions(com.azure.core.util.tracing.SpanKind.PRODUCER), inner1Suppressed); Context inner2Suppressed = openTelemetryTracer.start("innerSuppressed", inner1NotSuppressed); openTelemetryTracer.end("ok", null, inner2Suppressed); assertEquals(0, testExporter.getSpans().size()); openTelemetryTracer.end("ok", null, inner1NotSuppressed); openTelemetryTracer.end("ok", null, inner1Suppressed); openTelemetryTracer.end("ok", null, outer); assertEquals(2, testExporter.getSpans().size()); SpanData innerNotSuppressedSpan = testExporter.getSpans().get(0); SpanData outerSpan = testExporter.getSpans().get(1); assertEquals(innerNotSuppressedSpan.getSpanContext().getTraceId(), outerSpan.getSpanContext().getTraceId()); assertEquals(innerNotSuppressedSpan.getParentSpanId(), outerSpan.getSpanContext().getSpanId()); }
outer = openTelemetryTracer.start("innerSuppressed", outer, ProcessKind.SEND);
public void suppressNestedInterleavedClientSpan() { Context outer = openTelemetryTracer.getSharedSpanBuilder("outer", Context.NONE); openTelemetryTracer.addLink(outer.addData(SPAN_CONTEXT_KEY, TEST_CONTEXT)); outer = openTelemetryTracer.start("outer", outer, ProcessKind.SEND); Context inner1Suppressed = openTelemetryTracer.start("innerSuppressed", outer); Context inner1NotSuppressed = openTelemetryTracer.start("innerNotSuppressed", new StartSpanOptions(com.azure.core.util.tracing.SpanKind.PRODUCER), inner1Suppressed); Context inner2Suppressed = openTelemetryTracer.start("innerSuppressed", inner1NotSuppressed); openTelemetryTracer.end("ok", null, inner2Suppressed); assertEquals(0, testExporter.getFinishedSpanItems().size()); openTelemetryTracer.end("ok", null, inner1NotSuppressed); openTelemetryTracer.end("ok", null, inner1Suppressed); openTelemetryTracer.end("ok", null, outer); assertEquals(2, testExporter.getFinishedSpanItems().size()); SpanData innerNotSuppressedSpan = testExporter.getFinishedSpanItems().get(0); SpanData outerSpan = testExporter.getFinishedSpanItems().get(1); assertEquals(innerNotSuppressedSpan.getSpanContext().getTraceId(), outerSpan.getSpanContext().getTraceId()); assertEquals(innerNotSuppressedSpan.getParentSpanId(), outerSpan.getSpanContext().getSpanId()); }
class TestScope implements Scope { private boolean closed = false; @Override public void close() { closed = true; } public boolean isClosed() { return this.closed; } }
class TestScope implements Scope { private boolean closed = false; @Override public void close() { closed = true; } public boolean isClosed() { return this.closed; } }
should we just create static logger in restproxyutil?
public Object invoke(Object proxy, final Method method, Object[] args) { RestProxyUtil.validateResumeOperationIsNotPresent(method, LOGGER); try { final SwaggerMethodParser methodParser = getMethodParser(method); final HttpRequest request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args); RequestOptions options = methodParser.setRequestOptions(args); context = mergeRequestOptionsContext(context, options); context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName()) .addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType())); context = startTracingSpan(method, context); if (options != null) { options.getRequestCallback().accept(request); } if (request.getBody() != null) { request.setBody(validateLength(request)); } final Mono<HttpResponse> asyncResponse = send(request, context); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleRestReturnType(asyncDecodedResponse, methodParser, methodParser.getReturnType(), context, options); } catch (IOException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } }
RestProxyUtil.validateResumeOperationIsNotPresent(method, LOGGER);
public Object invoke(Object proxy, final Method method, Object[] args) { RestProxyUtils.validateResumeOperationIsNotPresent(method); try { final SwaggerMethodParser methodParser = getMethodParser(method); final HttpRequest request = createHttpRequest(methodParser, args); Context context = methodParser.setContext(args); RequestOptions options = methodParser.setRequestOptions(args); context = RestProxyUtils.mergeRequestOptionsContext(context, options); context = context.addData("caller-method", methodParser.getFullyQualifiedMethodName()) .addData("azure-eagerly-read-response", shouldEagerlyReadResponse(methodParser.getReturnType())); context = startTracingSpan(method, context); if (options != null) { options.getRequestCallback().accept(request); } Context finalContext = context; final Mono<HttpResponse> asyncResponse = RestProxyUtils.validateLengthAsync(request) .flatMap(r -> send(r, finalContext)); Mono<HttpDecodedResponse> asyncDecodedResponse = this.decoder.decode(asyncResponse, methodParser); return handleRestReturnType(asyncDecodedResponse, methodParser, methodParser.getReturnType(), context, options); } catch (IOException e) { throw LOGGER.logExceptionAsError(Exceptions.propagate(e)); } }
class RestProxy implements InvocationHandler { private static final ByteBuffer VALIDATION_BUFFER = ByteBuffer.allocate(0); private static final String BODY_TOO_LARGE = "Request body emitted %d bytes, more than the expected %d bytes."; private static final String BODY_TOO_SMALL = "Request body emitted %d bytes, less than the expected %d bytes."; private static final String MUST_IMPLEMENT_PAGE_ERROR = "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class; private static final ResponseConstructorsCache RESPONSE_CONSTRUCTORS_CACHE = new ResponseConstructorsCache(); private static final ClientLogger LOGGER = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods that * this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this * RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override static Flux<ByteBuffer> validateLength(final HttpRequest request) { final Flux<ByteBuffer> bbFlux = request.getBody(); if (bbFlux == null) { return Flux.empty(); } final long expectedLength = Long.parseLong(request.getHeaders().getValue("Content-Length")); return Flux.defer(() -> { final long[] currentTotalLength = new long[1]; return Flux.concat(bbFlux, Flux.just(VALIDATION_BUFFER)).handle((buffer, sink) -> { if (buffer == null) { return; } if (buffer == VALIDATION_BUFFER) { if (expectedLength != currentTotalLength[0]) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_SMALL, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); } else { sink.complete(); } return; } currentTotalLength[0] += buffer.remaining(); if (currentTotalLength[0] > expectedLength) { sink.error(new UnexpectedLengthException(String.format(BODY_TOO_LARGE, currentTotalLength[0], expectedLength), currentTotalLength[0], expectedLength)); return; } sink.next(buffer); }); }); } /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { if (!TracerProxy.isTracingEnabled()) { return context; } if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) { return context; } String spanName = interfaceParser.getServiceName() + "." + method.getName(); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); final UrlBuilder urlBuilder; if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); methodParser.setSchemeAndHost(args, urlBuilder); if (path != null && !path.isEmpty() && !"/".equals(path)) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || "/".equals(hostPath) || path.contains(": urlBuilder.setPath(path); } else { if (path.startsWith("/")) { urlBuilder.setPath(hostPath + path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } } methodParser.setEncodedQueryParameters(args, urlBuilder); final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); HttpHeaders httpHeaders = request.getHeaders(); methodParser.setHeaders(args, httpHeaders); return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(final HttpRequest request, final SwaggerMethodParser methodParser, final Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().set("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().set("Content-Type", contentType); if (bodyContentObject instanceof BinaryData) { BinaryData binaryData = (BinaryData) bodyContentObject; if (binaryData.getLength() != null) { request.setHeader("Content-Length", binaryData.getLength().toString()); } request.setBody(binaryData.toFluxByteBuffer()); return request; } boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (final String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.JSON, stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { ByteArrayOutputStream stream = new AccessibleByteArrayOutputStream(); serializer.serialize(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()), stream); request.setHeader("Content-Length", String.valueOf(stream.size())); request.setBody(Flux.defer(() -> Flux.just(ByteBuffer.wrap(stream.toByteArray(), 0, stream.size())))); } } return request; } private Mono<HttpDecodedResponse> ensureExpectedStatus(final Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { return asyncDecodedResponse .flatMap(decodedHttpResponse -> ensureExpectedStatus(decodedHttpResponse, methodParser, options)); } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status * code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[] * of additional allowed status codes. * * @param decodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface method that initiated * the HTTP request. * @return An async-version of the provided decodedResponse. */ private Mono<HttpDecodedResponse> ensureExpectedStatus(final HttpDecodedResponse decodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { final int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); if (methodParser.isExpectedResponseStatusCode(responseStatusCode) || (options != null && options.getErrorOptions().contains(ErrorOptions.NO_THROW))) { return Mono.just(decodedResponse); } return decodedResponse.getSourceResponse().getBodyAsByteArray() .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), null, null)); })) .flatMap(responseBytes -> decodedResponse.getDecodedBody(responseBytes) .switchIfEmpty(Mono.defer(() -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, null)); })) .flatMap(decodedBody -> { return Mono.error(instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), responseBytes, decodedBody)); })); } private Mono<?> handleRestResponseReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { if (entityType.equals(StreamResponse.class)) { return createResponse(response, entityType, null); } final Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { return response.getSourceResponse().getBody().ignoreElements() .then(createResponse(response, entityType, null)); } else { return handleBodyReturnType(response, methodParser, bodyType) .flatMap(bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.defer((Supplier<Mono<Response<?>>>) () -> createResponse(response, entityType, null))); } } else { return handleBodyReturnType(response, methodParser, entityType); } } @SuppressWarnings("unchecked") private Mono<Response<?>> createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { final Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); final HttpResponse httpResponse = response.getSourceResponse(); final HttpRequest request = httpResponse.getRequest(); final int statusCode = httpResponse.getStatusCode(); final HttpHeaders headers = httpResponse.getHeaders(); final Object decodedHeaders = response.getDecodedHeaders(); if (cls.equals(Response.class)) { return Mono.defer(() -> Mono.just(cls.cast(new ResponseBase<>(request, statusCode, headers, bodyAsObject, decodedHeaders)))); } else if (cls.equals(PagedResponse.class)) { return Mono.create(sink -> { if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { sink.error(LOGGER.logExceptionAsError(new RuntimeException(MUST_IMPLEMENT_PAGE_ERROR))); } else if (bodyAsObject == null) { sink.success(cls.cast(new PagedResponseBase<>(request, statusCode, headers, null, null, decodedHeaders))); } else { sink.success(cls.cast(new PagedResponseBase<>(request, statusCode, headers, (Page<?>) bodyAsObject, decodedHeaders))); } }); } else if (cls.equals(StreamResponse.class)) { return Mono.just(new StreamResponse(request, httpResponse)); } return Mono.just(RESPONSE_CONSTRUCTORS_CACHE.get(cls)) .switchIfEmpty(Mono.defer(() -> Mono.error(new RuntimeException("Cannot find suitable constructor for class " + cls)))) .flatMap(ctr -> RESPONSE_CONSTRUCTORS_CACHE.invoke(ctr, response, bodyAsObject)); } private Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf( entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync .mapNotNull(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) { if (methodParser.getReturnType().equals(StreamResponse.class)) { asyncResult = Mono.empty(); } else { asyncResult = BinaryData.fromFlux(response.getSourceResponse().getBody()); } } else { asyncResult = response.getDecodedBody((byte[]) null); } return asyncResult; } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(final Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, final Context context, final RequestOptions options) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser, options) .doOnEach(RestProxy::endTracingSpan) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (!TracerProxy.isTracingEnabled()) { return; } if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } ContextView context = signal.getContextView(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); boolean disableTracing = Boolean.TRUE.equals(context.getOrDefault(Tracer.DISABLE_TRACING_KEY, false)); if (!tracingContext.isPresent() || disableTracing) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, createDefaultPipeline(), createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests * @param serializer the serializer that will be used to convert POJOs to and from request and response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
class RestProxy implements InvocationHandler { private static final String MUST_IMPLEMENT_PAGE_ERROR = "Unable to create PagedResponse<T>. Body must be of a type that implements: " + Page.class; private static final ResponseConstructorsCache RESPONSE_CONSTRUCTORS_CACHE = new ResponseConstructorsCache(); private static final ClientLogger LOGGER = new ClientLogger(RestProxy.class); private final HttpPipeline httpPipeline; private final SerializerAdapter serializer; private final SwaggerInterfaceParser interfaceParser; private final HttpResponseDecoder decoder; /** * Create a RestProxy. * * @param httpPipeline the HttpPipelinePolicy and HttpClient httpPipeline that will be used to send HTTP requests. * @param serializer the serializer that will be used to convert response bodies to POJOs. * @param interfaceParser the parser that contains information about the interface describing REST API methods that * this RestProxy "implements". */ private RestProxy(HttpPipeline httpPipeline, SerializerAdapter serializer, SwaggerInterfaceParser interfaceParser) { this.httpPipeline = httpPipeline; this.serializer = serializer; this.interfaceParser = interfaceParser; this.decoder = new HttpResponseDecoder(this.serializer); } /** * Get the SwaggerMethodParser for the provided method. The Method must exist on the Swagger interface that this * RestProxy was created to "implement". * * @param method the method to get a SwaggerMethodParser for * @return the SwaggerMethodParser for the provided method */ private SwaggerMethodParser getMethodParser(Method method) { return interfaceParser.getMethodParser(method); } /** * Send the provided request asynchronously, applying any request policies provided to the HttpClient instance. * * @param request the HTTP request to send * @param contextData the context * @return a {@link Mono} that emits HttpResponse asynchronously */ public Mono<HttpResponse> send(HttpRequest request, Context contextData) { return httpPipeline.send(request, contextData); } @Override /** * Starts the tracing span for the current service call, additionally set metadata attributes on the span by passing * additional context information. * * @param method Service method being called. * @param context Context information about the current service call. * @return The updated context containing the span context. */ private Context startTracingSpan(Method method, Context context) { if (!TracerProxy.isTracingEnabled()) { return context; } if ((boolean) context.getData(Tracer.DISABLE_TRACING_KEY).orElse(false)) { return context; } String spanName = interfaceParser.getServiceName() + "." + method.getName(); context = TracerProxy.setSpanName(spanName, context); return TracerProxy.start(spanName, context); } /** * Create a HttpRequest for the provided Swagger method using the provided arguments. * * @param methodParser the Swagger method parser to use * @param args the arguments to use to populate the method's annotation values * @return a HttpRequest * @throws IOException thrown if the body contents cannot be serialized */ private HttpRequest createHttpRequest(SwaggerMethodParser methodParser, Object[] args) throws IOException { final String path = methodParser.setPath(args); final UrlBuilder pathUrlBuilder = UrlBuilder.parse(path); final UrlBuilder urlBuilder; if (pathUrlBuilder.getScheme() != null) { urlBuilder = pathUrlBuilder; } else { urlBuilder = new UrlBuilder(); methodParser.setSchemeAndHost(args, urlBuilder); if (path != null && !path.isEmpty() && !"/".equals(path)) { String hostPath = urlBuilder.getPath(); if (hostPath == null || hostPath.isEmpty() || "/".equals(hostPath) || path.contains(": urlBuilder.setPath(path); } else { if (path.startsWith("/")) { urlBuilder.setPath(hostPath + path); } else { urlBuilder.setPath(hostPath + "/" + path); } } } } methodParser.setEncodedQueryParameters(args, urlBuilder); final URL url = urlBuilder.toUrl(); final HttpRequest request = configRequest(new HttpRequest(methodParser.getHttpMethod(), url), methodParser, args); HttpHeaders httpHeaders = request.getHeaders(); methodParser.setHeaders(args, httpHeaders); return request; } @SuppressWarnings("unchecked") private HttpRequest configRequest(final HttpRequest request, final SwaggerMethodParser methodParser, final Object[] args) throws IOException { final Object bodyContentObject = methodParser.setBody(args); if (bodyContentObject == null) { request.getHeaders().set("Content-Length", "0"); } else { String contentType = methodParser.getBodyContentType(); if (contentType == null || contentType.isEmpty()) { if (bodyContentObject instanceof byte[] || bodyContentObject instanceof String) { contentType = ContentType.APPLICATION_OCTET_STREAM; } else { contentType = ContentType.APPLICATION_JSON; } } request.getHeaders().set("Content-Type", contentType); if (bodyContentObject instanceof BinaryData) { BinaryData binaryData = (BinaryData) bodyContentObject; if (binaryData.getLength() != null) { request.setHeader("Content-Length", binaryData.getLength().toString()); } request.setBody(binaryData); return request; } boolean isJson = false; final String[] contentTypeParts = contentType.split(";"); for (final String contentTypePart : contentTypeParts) { if (contentTypePart.trim().equalsIgnoreCase(ContentType.APPLICATION_JSON)) { isJson = true; break; } } if (isJson) { request.setBody(serializer.serializeToBytes(bodyContentObject, SerializerEncoding.JSON)); } else if (FluxUtil.isFluxByteBuffer(methodParser.getBodyJavaType())) { request.setBody((Flux<ByteBuffer>) bodyContentObject); } else if (bodyContentObject instanceof byte[]) { request.setBody((byte[]) bodyContentObject); } else if (bodyContentObject instanceof String) { final String bodyContentString = (String) bodyContentObject; if (!bodyContentString.isEmpty()) { request.setBody(bodyContentString); } } else if (bodyContentObject instanceof ByteBuffer) { request.setBody(Flux.just((ByteBuffer) bodyContentObject)); } else { request.setBody(serializer.serializeToBytes(bodyContentObject, SerializerEncoding.fromHeaders(request.getHeaders()))); } } return request; } /** * Create a publisher that (1) emits error if the provided response {@code decodedResponse} has 'disallowed status * code' OR (2) emits provided response if it's status code ia allowed. * * 'disallowed status code' is one of the status code defined in the provided SwaggerMethodParser or is in the int[] * of additional allowed status codes. * * @param asyncDecodedResponse The HttpResponse to check. * @param methodParser The method parser that contains information about the service interface method that initiated * the HTTP request. * @param options Additional options passed as part of the request. * @return An async-version of the provided decodedResponse. */ private static Mono<HttpDecodedResponse> ensureExpectedStatus(final Mono<HttpDecodedResponse> asyncDecodedResponse, final SwaggerMethodParser methodParser, RequestOptions options) { return asyncDecodedResponse.flatMap(decodedResponse -> { int responseStatusCode = decodedResponse.getSourceResponse().getStatusCode(); if (methodParser.isExpectedResponseStatusCode(responseStatusCode) || (options != null && options.getErrorOptions().contains(ErrorOptions.NO_THROW))) { return Mono.just(decodedResponse); } return decodedResponse.getSourceResponse().getBodyAsByteArray() .map(bytes -> RestProxyUtils.instantiateUnexpectedException(methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), bytes, decodedResponse.getDecodedBody(bytes))) .switchIfEmpty(Mono.fromSupplier(() -> RestProxyUtils.instantiateUnexpectedException( methodParser.getUnexpectedException(responseStatusCode), decodedResponse.getSourceResponse(), null, null))) .flatMap(Mono::error); }); } private static Mono<?> handleRestResponseReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { if (TypeUtil.isTypeOrSubTypeOf(entityType, Response.class)) { if (entityType.equals(StreamResponse.class)) { return Mono.fromCallable(() -> createResponse(response, entityType, null)); } final Type bodyType = TypeUtil.getRestResponseBodyType(entityType); if (TypeUtil.isTypeOrSubTypeOf(bodyType, Void.class)) { return response.getSourceResponse().getBody().ignoreElements() .then(Mono.fromCallable(() -> createResponse(response, entityType, null))); } else { return handleBodyReturnType(response, methodParser, bodyType) .map(bodyAsObject -> createResponse(response, entityType, bodyAsObject)) .switchIfEmpty(Mono.fromCallable(() -> createResponse(response, entityType, null))); } } else { return handleBodyReturnType(response, methodParser, entityType); } } @SuppressWarnings({"unchecked", "rawtypes"}) private static Response createResponse(HttpDecodedResponse response, Type entityType, Object bodyAsObject) { final Class<? extends Response<?>> cls = (Class<? extends Response<?>>) TypeUtil.getRawClass(entityType); final HttpResponse httpResponse = response.getSourceResponse(); final HttpRequest request = httpResponse.getRequest(); final int statusCode = httpResponse.getStatusCode(); final HttpHeaders headers = httpResponse.getHeaders(); final Object decodedHeaders = response.getDecodedHeaders(); if (cls.equals(Response.class)) { return cls.cast(new ResponseBase<>(request, statusCode, headers, bodyAsObject, decodedHeaders)); } else if (cls.equals(PagedResponse.class)) { if (bodyAsObject != null && !TypeUtil.isTypeOrSubTypeOf(bodyAsObject.getClass(), Page.class)) { throw LOGGER.logExceptionAsError(new RuntimeException(MUST_IMPLEMENT_PAGE_ERROR)); } else if (bodyAsObject == null) { return cls.cast(new PagedResponseBase<>(request, statusCode, headers, null, null, decodedHeaders)); } else { return cls.cast(new PagedResponseBase<>(request, statusCode, headers, (Page<?>) bodyAsObject, decodedHeaders)); } } else if (cls.equals(StreamResponse.class)) { return new StreamResponse(request, httpResponse); } MethodHandle constructorHandle = RESPONSE_CONSTRUCTORS_CACHE.get(cls); return RESPONSE_CONSTRUCTORS_CACHE.invoke(constructorHandle, response, bodyAsObject); } private static Mono<?> handleBodyReturnType(final HttpDecodedResponse response, final SwaggerMethodParser methodParser, final Type entityType) { final int responseStatusCode = response.getSourceResponse().getStatusCode(); final HttpMethod httpMethod = methodParser.getHttpMethod(); final Type returnValueWireType = methodParser.getReturnValueWireType(); final Mono<?> asyncResult; if (httpMethod == HttpMethod.HEAD && (TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.TYPE) || TypeUtil.isTypeOrSubTypeOf(entityType, Boolean.class))) { boolean isSuccess = (responseStatusCode / 100) == 2; asyncResult = Mono.just(isSuccess); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, byte[].class)) { Mono<byte[]> responseBodyBytesAsync = response.getSourceResponse().getBodyAsByteArray(); if (returnValueWireType == Base64Url.class) { responseBodyBytesAsync = responseBodyBytesAsync .mapNotNull(base64UrlBytes -> new Base64Url(base64UrlBytes).decodedBytes()); } asyncResult = responseBodyBytesAsync; } else if (FluxUtil.isFluxByteBuffer(entityType)) { asyncResult = Mono.just(response.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(entityType, BinaryData.class)) { if (methodParser.getReturnType().equals(StreamResponse.class)) { asyncResult = Mono.empty(); } else { asyncResult = BinaryData.fromFlux(response.getSourceResponse().getBody()); } } else { asyncResult = response.getSourceResponse().getBodyAsByteArray().mapNotNull(response::getDecodedBody); } return asyncResult; } /** * Handle the provided asynchronous HTTP response and return the deserialized value. * * @param asyncHttpDecodedResponse the asynchronous HTTP response to the original HTTP request * @param methodParser the SwaggerMethodParser that the request originates from * @param returnType the type of value that will be returned * @param context Additional context that is passed through the Http pipeline during the service call. * @return the deserialized result */ private Object handleRestReturnType(final Mono<HttpDecodedResponse> asyncHttpDecodedResponse, final SwaggerMethodParser methodParser, final Type returnType, final Context context, final RequestOptions options) { final Mono<HttpDecodedResponse> asyncExpectedResponse = ensureExpectedStatus(asyncHttpDecodedResponse, methodParser, options) .doOnEach(RestProxy::endTracingSpan) .contextWrite(reactor.util.context.Context.of("TRACING_CONTEXT", context)); final Object result; if (TypeUtil.isTypeOrSubTypeOf(returnType, Mono.class)) { final Type monoTypeParam = TypeUtil.getTypeArgument(returnType); if (TypeUtil.isTypeOrSubTypeOf(monoTypeParam, Void.class)) { result = asyncExpectedResponse.then(); } else { result = asyncExpectedResponse.flatMap(response -> handleRestResponseReturnType(response, methodParser, monoTypeParam)); } } else if (FluxUtil.isFluxByteBuffer(returnType)) { result = asyncExpectedResponse.flatMapMany(ar -> ar.getSourceResponse().getBody()); } else if (TypeUtil.isTypeOrSubTypeOf(returnType, void.class) || TypeUtil.isTypeOrSubTypeOf(returnType, Void.class)) { asyncExpectedResponse.block(); result = null; } else { result = asyncExpectedResponse .flatMap(httpResponse -> handleRestResponseReturnType(httpResponse, methodParser, returnType)) .block(); } return result; } private static void endTracingSpan(Signal<HttpDecodedResponse> signal) { if (!TracerProxy.isTracingEnabled()) { return; } if (signal.isOnComplete() || signal.isOnSubscribe()) { return; } ContextView context = signal.getContextView(); Optional<Context> tracingContext = context.getOrEmpty("TRACING_CONTEXT"); boolean disableTracing = Boolean.TRUE.equals(context.getOrDefault(Tracer.DISABLE_TRACING_KEY, false)); if (!tracingContext.isPresent() || disableTracing) { return; } int statusCode = 0; HttpDecodedResponse httpDecodedResponse; Throwable throwable = null; if (signal.hasValue()) { httpDecodedResponse = signal.get(); statusCode = httpDecodedResponse.getSourceResponse().getStatusCode(); } else if (signal.hasError()) { throwable = signal.getThrowable(); if (throwable instanceof HttpResponseException) { HttpResponseException exception = (HttpResponseException) throwable; statusCode = exception.getResponse().getStatusCode(); } } TracerProxy.end(statusCode, throwable, tracingContext.get()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface) { return create(swaggerInterface, RestProxyUtils.createDefaultPipeline(), RestProxyUtils.createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipeline that will be used to send Http requests * @param <A> the type of the Swagger interface * @return a proxy implementation of the provided Swagger interface */ public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline) { return create(swaggerInterface, httpPipeline, RestProxyUtils.createDefaultSerializer()); } /** * Create a proxy implementation of the provided Swagger interface. * * @param swaggerInterface the Swagger interface to provide a proxy implementation for * @param httpPipeline the HttpPipelinePolicy and HttpClient pipline that will be used to send Http requests * @param serializer the serializer that will be used to convert POJOs to and from request and response bodies * @param <A> the type of the Swagger interface. * @return a proxy implementation of the provided Swagger interface */ @SuppressWarnings("unchecked") public static <A> A create(Class<A> swaggerInterface, HttpPipeline httpPipeline, SerializerAdapter serializer) { final SwaggerInterfaceParser interfaceParser = new SwaggerInterfaceParser(swaggerInterface, serializer); final RestProxy restProxy = new RestProxy(httpPipeline, serializer, interfaceParser); return (A) Proxy.newProxyInstance(swaggerInterface.getClassLoader(), new Class<?>[]{swaggerInterface}, restProxy); } }
manual code here
public void certificateCRUD() { ResourceManager resourceManager = createResourceManager(); IotDpsManager iotDpsManager = createIotDpsManager(); ResourceGroup resourceGroup = createResourceGroup(resourceManager); try { ProvisioningServiceDescriptionInner provisioningServiceDescription = createProvisioningService(iotDpsManager, resourceGroup); CertificateResponseInner certificateInner = new CertificateResponseInner() .withProperties(new CertificateProperties() .withCertificate(Constants.Certificate.CONTENT.getBytes(StandardCharsets.UTF_8))); iotDpsManager .serviceClient() .getDpsCertificates() .createOrUpdate( resourceGroup.name(), provisioningServiceDescription.name(), Constants.Certificate.NAME, certificateInner); CertificateListDescriptionInner certificateListDescription = iotDpsManager .serviceClient() .getDpsCertificates() .list( resourceGroup.name(), provisioningServiceDescription.name()); assertEquals(1, certificateListDescription.value().size()); CertificateResponseInner certificate = certificateListDescription.value().get(0); assertFalse(certificate.properties().isVerified()); assertEquals(Constants.Certificate.SUBJECT, certificate.properties().subject()); assertEquals(Constants.Certificate.THUMBPRINT, certificate.properties().thumbprint()); VerificationCodeResponseInner verificationCodeResponse = iotDpsManager .serviceClient() .getDpsCertificates() .generateVerificationCode( certificate.name(), certificate.etag(), resourceGroup.name(), provisioningServiceDescription.name()); assertNotNull(verificationCodeResponse.properties().verificationCode()); iotDpsManager .serviceClient() .getDpsCertificates() .delete( resourceGroup.name(), verificationCodeResponse.etag(), provisioningServiceDescription.name(), certificate.name()); certificateListDescription = iotDpsManager .serviceClient() .getDpsCertificates() .list( resourceGroup.name(), provisioningServiceDescription.name()); assertEquals(0, certificateListDescription.value().size()); } finally { deleteResourceGroup(resourceManager, resourceGroup); } }
CertificateResponseInner certificateInner = new CertificateResponseInner()
public void certificateCRUD() { ResourceManager resourceManager = createResourceManager(); IotDpsManager iotDpsManager = createIotDpsManager(); ResourceGroup resourceGroup = createResourceGroup(resourceManager); try { ProvisioningServiceDescriptionInner provisioningServiceDescription = createProvisioningService(iotDpsManager, resourceGroup); CertificateResponseInner certificateInner = new CertificateResponseInner() .withProperties(new CertificateProperties() .withCertificate(Constants.Certificate.CONTENT.getBytes(StandardCharsets.UTF_8))); iotDpsManager .serviceClient() .getDpsCertificates() .createOrUpdate( resourceGroup.name(), provisioningServiceDescription.name(), Constants.Certificate.NAME, certificateInner); CertificateListDescriptionInner certificateListDescription = iotDpsManager .serviceClient() .getDpsCertificates() .list( resourceGroup.name(), provisioningServiceDescription.name()); assertEquals(1, certificateListDescription.value().size()); CertificateResponseInner certificate = certificateListDescription.value().get(0); assertFalse(certificate.properties().isVerified()); assertEquals(Constants.Certificate.SUBJECT, certificate.properties().subject()); assertEquals(Constants.Certificate.THUMBPRINT, certificate.properties().thumbprint()); VerificationCodeResponseInner verificationCodeResponse = iotDpsManager .serviceClient() .getDpsCertificates() .generateVerificationCode( certificate.name(), certificate.etag(), resourceGroup.name(), provisioningServiceDescription.name()); assertNotNull(verificationCodeResponse.properties().verificationCode()); iotDpsManager .serviceClient() .getDpsCertificates() .delete( resourceGroup.name(), verificationCodeResponse.etag(), provisioningServiceDescription.name(), certificate.name()); certificateListDescription = iotDpsManager .serviceClient() .getDpsCertificates() .list( resourceGroup.name(), provisioningServiceDescription.name()); assertEquals(0, certificateListDescription.value().size()); } finally { deleteResourceGroup(resourceManager, resourceGroup); } }
class CertificatesTests extends DeviceProvisioningTestBase { @Test @DoNotRecord(skipInPlayback = true) }
class CertificatesTests extends DeviceProvisioningTestBase { @Test @DoNotRecord(skipInPlayback = true) }
If the application has no activity by it's own accord, would that translate into unhealthy report?
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime();
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
Is write not applicable for idleness?
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) {
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
Are the reads and writes the end user workloads? If so for write-only/heavy below condition might match right?
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
Yes - would hit the idle time And that is intentional - I didn't change the actual logic - just added an overload providing the reason for failure
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime();
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
no - because the max time for write after read - like between request sent and first byte of response received is 10 seconds - compared ot idle time that is small time period. Also this logic is the same as it was in place - just added the reasons - no reason to change the logic (at least until we have more diagnostics)
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) {
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
NO - write means request sent (could be a point read request) - so timespan between last byte of request sent and first byte received - because rntbd like http (at least 1) is strictly request/response protocol that works
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
public Future<String> isHealthyWithFailureReason(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<String> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess("active with no request manager"); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding write: (lastChannelWriteAttemptNanoTime: {1}, " + "lastChannelWriteNanoTime: {2}, writeDelayInNanos: {3}, writeDelayLimitInNanos: {4}, " + "rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); String msg = MessageFormat.format( "{0} health check failed due to nonresponding read: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "readDelay: {3}, readDelayLimit: {4}, rntbdContext: {5}, pendingRequestCount: {6})", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount ); return promise.setSuccess(msg); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { String msg = MessageFormat.format( "{0} health check failed due to idle connection timeout: (lastChannelWrite: {1}, lastChannelRead: {2}, " + "idleConnectionTimeout: {3}, currentTime: {4}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), idleConnectionTimeoutInNanos, currentTime ); return promise.setSuccess(msg); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(RntbdConstants.RntbdHealthCheckResults.SuccessValue); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); String msg = MessageFormat.format( "{0} health check request failed due to: {1}", channel, completed.cause().toString() ); promise.setSuccess(msg); } }); return promise; }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
class RntbdClientChannelHealthChecker implements ChannelHealthChecker { private static final Logger logger = LoggerFactory.getLogger(RntbdClientChannelHealthChecker.class); private static final long recentReadWindowInNanos = 1_000_000_000L; private static final long readHangGracePeriodInNanos = (45L + 10L) * 1_000_000_000L; private static final long writeHangGracePeriodInNanos = 2L * 1_000_000_000L; @JsonProperty private final long idleConnectionTimeoutInNanos; @JsonProperty private final long readDelayLimitInNanos; @JsonProperty private final long writeDelayLimitInNanos; public RntbdClientChannelHealthChecker(final Config config) { checkNotNull(config, "expected non-null config"); checkArgument(config.receiveHangDetectionTimeInNanos() > readHangGracePeriodInNanos, "config.receiveHangDetectionTimeInNanos: %s", config.receiveHangDetectionTimeInNanos()); checkArgument(config.sendHangDetectionTimeInNanos() > writeHangGracePeriodInNanos, "config.sendHangDetectionTimeInNanos: %s", config.sendHangDetectionTimeInNanos()); this.idleConnectionTimeoutInNanos = config.idleConnectionTimeoutInNanos(); this.readDelayLimitInNanos = config.receiveHangDetectionTimeInNanos(); this.writeDelayLimitInNanos = config.sendHangDetectionTimeInNanos(); } /** * Returns the idle connection timeout interval in nanoseconds. * <p> * A channel is considered idle if {@link * the last channel read is greater than {@link * * @return Idle connection timeout interval in nanoseconds. */ public long idleConnectionTimeoutInNanos() { return this.idleConnectionTimeoutInNanos; } /** * Returns the read delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write and the last channel read grows * beyond this value. * <p> * Constraint: {@link * * @return Read delay limit in nanoseconds. */ public long readDelayLimitInNanos() { return this.readDelayLimitInNanos; } /** * Returns the write delay limit in nanoseconds. * <p> * A channel will be declared unhealthy if the gap between the last channel write attempt and the last channel write * grows beyond this value. * <p> * Constraint: {@link * * @return Write delay limit in nanoseconds. */ public long writeDelayLimitInNanos() { return this.writeDelayLimitInNanos; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ public Future<Boolean> isHealthy(final Channel channel) { checkNotNull(channel, "expected non-null channel"); final RntbdRequestManager requestManager = channel.pipeline().get(RntbdRequestManager.class); final Promise<Boolean> promise = channel.eventLoop().newPromise(); if (requestManager == null) { reportIssueUnless(logger, !channel.isActive(), channel, "active with no request manager"); return promise.setSuccess(Boolean.FALSE); } final Timestamps timestamps = requestManager.snapshotTimestamps(); final long currentTime = System.nanoTime(); if (currentTime - timestamps.lastChannelReadNanoTime() < recentReadWindowInNanos) { return promise.setSuccess(Boolean.TRUE); } final long writeDelayInNanos = timestamps.lastChannelWriteAttemptNanoTime() - timestamps.lastChannelWriteNanoTime(); final long writeHangDurationInNanos = currentTime - timestamps.lastChannelWriteAttemptNanoTime(); if (writeDelayInNanos > this.writeDelayLimitInNanos && writeHangDurationInNanos > writeHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding write: {lastChannelWriteAttemptNanoTime: {}, " + "lastChannelWriteNanoTime: {}, writeDelayInNanos: {}, writeDelayLimitInNanos: {}, " + "rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteAttemptNanoTime(), timestamps.lastChannelWriteNanoTime(), writeDelayInNanos, this.writeDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } final long readDelay = timestamps.lastChannelWriteNanoTime() - timestamps.lastChannelReadNanoTime(); final long readHangDuration = currentTime - timestamps.lastChannelWriteNanoTime(); if (readDelay > this.readDelayLimitInNanos && readHangDuration > readHangGracePeriodInNanos) { final Optional<RntbdContext> rntbdContext = requestManager.rntbdContext(); final int pendingRequestCount = requestManager.pendingRequestCount(); logger.warn("{} health check failed due to nonresponding read: {lastChannelWrite: {}, lastChannelRead: {}, " + "readDelay: {}, readDelayLimit: {}, rntbdContext: {}, pendingRequestCount: {}}", channel, timestamps.lastChannelWriteNanoTime(), timestamps.lastChannelReadNanoTime(), readDelay, this.readDelayLimitInNanos, rntbdContext, pendingRequestCount); return promise.setSuccess(Boolean.FALSE); } if (this.idleConnectionTimeoutInNanos > 0L) { if (currentTime - timestamps.lastChannelReadNanoTime() > this.idleConnectionTimeoutInNanos) { return promise.setSuccess(Boolean.FALSE); } } channel.writeAndFlush(RntbdHealthCheckRequest.MESSAGE).addListener(completed -> { if (completed.isSuccess()) { promise.setSuccess(Boolean.TRUE); } else { logger.warn("{} health check request failed due to:", channel, completed.cause()); promise.setSuccess(Boolean.FALSE); } }); return promise; } /** * Determines whether a specified channel is healthy. * * @param channel A channel whose health is to be checked. * @return A future with a result of {@code true} if the channel is healthy, or {@code false} otherwise. */ @Override public String toString() { return RntbdObjectMapper.toString(this); } static final class Timestamps { private static final AtomicLongFieldUpdater<Timestamps> lastPingUpdater = newUpdater(Timestamps.class, "lastPingNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastReadUpdater = newUpdater(Timestamps.class, "lastReadNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteUpdater = newUpdater(Timestamps.class, "lastWriteNanoTime"); private static final AtomicLongFieldUpdater<Timestamps> lastWriteAttemptUpdater = newUpdater(Timestamps.class, "lastWriteAttemptNanoTime"); private volatile long lastPingNanoTime; private volatile long lastReadNanoTime; private volatile long lastWriteNanoTime; private volatile long lastWriteAttemptNanoTime; public Timestamps() { } @SuppressWarnings("CopyConstructorMissesField") public Timestamps(Timestamps other) { checkNotNull(other, "other: null"); this.lastPingNanoTime = lastPingUpdater.get(other); this.lastReadNanoTime = lastReadUpdater.get(other); this.lastWriteNanoTime = lastWriteUpdater.get(other); this.lastWriteAttemptNanoTime = lastWriteAttemptUpdater.get(other); } public void channelPingCompleted() { lastPingUpdater.set(this, System.nanoTime()); } public void channelReadCompleted() { lastReadUpdater.set(this, System.nanoTime()); } public void channelWriteAttempted() { lastWriteUpdater.set(this, System.nanoTime()); } public void channelWriteCompleted() { lastWriteAttemptUpdater.set(this, System.nanoTime()); } @JsonProperty public long lastChannelPingNanoTime() { return lastPingUpdater.get(this); } @JsonProperty public long lastChannelReadNanoTime() { return lastReadUpdater.get(this); } @JsonProperty public long lastChannelWriteNanoTime() { return lastWriteUpdater.get(this); } @JsonProperty public long lastChannelWriteAttemptNanoTime() { return lastWriteAttemptUpdater.get(this); } @Override public String toString() { return RntbdObjectMapper.toString(this); } } }
Does this need to include the try count as that is inferred by attempting more than the maximum limit
private boolean isValidRedirectCount(int tryCount) { if (tryCount >= getMaxAttempts()) { LOGGER.atError() .addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount) .addKeyValue("maxAttempts", getMaxAttempts()) .log("Redirect attempts have been exhausted."); return false; } return true; }
.addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount)
private boolean isValidRedirectCount(int tryCount) { if (tryCount >= getMaxAttempts()) { LOGGER.atError() .addKeyValue("maxAttempts", getMaxAttempts()) .log("Redirect attempts have been exhausted."); return false; } return true; }
class DefaultRedirectStrategy implements RedirectStrategy { private static final ClientLogger LOGGER = new ClientLogger(DefaultRedirectStrategy.class); private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3; private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location"; private static final int PERMANENT_REDIRECT_STATUS_CODE = 308; private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307; private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<>(Arrays.asList(HttpMethod.GET, HttpMethod.HEAD)); private final int maxAttempts; private final String locationHeader; private final Set<HttpMethod> allowedRedirectHttpMethods; /** * Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3, * header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod */ public DefaultRedirectStrategy() { this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and * default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod * * @param maxAttempts The max number of redirect attempts that can be made. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts) { this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy}. * * @param maxAttempts The max number of redirect attempts that can be made. * @param locationHeader The header name containing the redirect URL. * @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) { if (maxAttempts < 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0.")); } this.maxAttempts = maxAttempts; if (CoreUtils.isNullOrEmpty(locationHeader)) { LOGGER.error("'locationHeader' provided as null will be defaulted to {}", DEFAULT_REDIRECT_LOCATION_HEADER_NAME); this.locationHeader = DEFAULT_REDIRECT_LOCATION_HEADER_NAME; } else { this.locationHeader = locationHeader; } if (CoreUtils.isNullOrEmpty(allowedMethods)) { LOGGER.error("'allowedMethods' provided as null will be defaulted to {}", DEFAULT_REDIRECT_ALLOWED_METHODS); this.allowedRedirectHttpMethods = DEFAULT_REDIRECT_ALLOWED_METHODS; } else { this.allowedRedirectHttpMethods = allowedMethods; } } @Override public boolean shouldAttemptRedirect(HttpPipelineCallContext context, HttpResponse httpResponse, int tryCount, Set<String> attemptedRedirectUrls) { if (isValidRedirectStatusCode(httpResponse.getStatusCode()) && isValidRedirectCount(tryCount) && isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) { String redirectUrl = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); if (redirectUrl != null && !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)) { LOGGER.atVerbose() .addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount) .addKeyValue(LoggingKeys.REDIRECT_URL_KEY, redirectUrl) .log("Redirecting."); attemptedRedirectUrls.add(redirectUrl); return true; } else { return false; } } else { return false; } } @Override public HttpRequest createRedirectRequest(HttpResponse httpResponse) { String responseLocation = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); return httpResponse.getRequest().setUrl(responseLocation); } @Override public int getMaxAttempts() { return maxAttempts; } /* * The header name to look up the value for the redirect url in response headers. * * @return the value of the header, or null if the header doesn't exist in the response. */ String getLocationHeader() { return locationHeader; } /* * The {@link HttpMethod http methods} that are allowed to be redirected. * * @return the set of allowed redirect http methods. */ Set<HttpMethod> getAllowedRedirectHttpMethods() { return allowedRedirectHttpMethods; } /** * Check if the redirect url provided in the response headers is already attempted. * * @param redirectUrl the redirect url provided in the response header. * @param attemptedRedirectUrls the set containing a list of attempted redirect locations. * @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect * , {@code false} otherwise. */ private boolean alreadyAttemptedRedirectUrl(String redirectUrl, Set<String> attemptedRedirectUrls) { if (attemptedRedirectUrls.contains(redirectUrl)) { LOGGER.atError() .addKeyValue(LoggingKeys.REDIRECT_URL_KEY, redirectUrl) .log("Request was redirected more than once to the same URL."); return true; } return false; } /** * Check if the attempt count of the redirect is less than the {@code maxAttempts} * * @param tryCount the try count for the HTTP request associated to the HTTP response. * @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise. */ /** * Check if the request http method is a valid redirect method. * * @param httpMethod the http method of the request. * @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise. */ private boolean isAllowedRedirectMethod(HttpMethod httpMethod) { if (getAllowedRedirectHttpMethods().contains(httpMethod)) { return true; } else { LOGGER.atError() .addKeyValue(LoggingKeys.HTTP_METHOD_KEY, httpMethod) .log("Request was redirected from an invalid redirect allowed method."); return false; } } /** * Checks if the incoming request status code is a valid redirect status code. * * @param statusCode the status code of the incoming request. * @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise. */ private boolean isValidRedirectStatusCode(int statusCode) { return statusCode == HttpURLConnection.HTTP_MOVED_TEMP || statusCode == HttpURLConnection.HTTP_MOVED_PERM || statusCode == PERMANENT_REDIRECT_STATUS_CODE || statusCode == TEMPORARY_REDIRECT_STATUS_CODE; } /** * Gets the redirect url from the response headers. * * @param headers the http response headers. * @param headerName the header name to look up value for. * @return the header value for the provided header name, {@code null} otherwise. */ String tryGetRedirectHeader(HttpHeaders headers, String headerName) { String headerValue = headers.getValue(headerName); if (CoreUtils.isNullOrEmpty(headerValue)) { LOGGER.atError() .addKeyValue("headerName", headerName) .log("Redirect url was null, request redirect was terminated."); return null; } else { return headerValue; } } }
class DefaultRedirectStrategy implements RedirectStrategy { private static final ClientLogger LOGGER = new ClientLogger(DefaultRedirectStrategy.class); private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3; private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location"; private static final int PERMANENT_REDIRECT_STATUS_CODE = 308; private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307; private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<>(Arrays.asList(HttpMethod.GET, HttpMethod.HEAD)); private static final String REDIRECT_URLS_KEY = "redirectUrls"; private final int maxAttempts; private final String locationHeader; private final Set<HttpMethod> allowedRedirectHttpMethods; /** * Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3, * header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod */ public DefaultRedirectStrategy() { this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and * default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod * * @param maxAttempts The max number of redirect attempts that can be made. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts) { this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy}. * * @param maxAttempts The max number of redirect attempts that can be made. * @param locationHeader The header name containing the redirect URL. * @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) { if (maxAttempts < 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0.")); } this.maxAttempts = maxAttempts; if (CoreUtils.isNullOrEmpty(locationHeader)) { LOGGER.error("'locationHeader' provided as null will be defaulted to {}", DEFAULT_REDIRECT_LOCATION_HEADER_NAME); this.locationHeader = DEFAULT_REDIRECT_LOCATION_HEADER_NAME; } else { this.locationHeader = locationHeader; } if (CoreUtils.isNullOrEmpty(allowedMethods)) { LOGGER.error("'allowedMethods' provided as null will be defaulted to {}", DEFAULT_REDIRECT_ALLOWED_METHODS); this.allowedRedirectHttpMethods = DEFAULT_REDIRECT_ALLOWED_METHODS; } else { this.allowedRedirectHttpMethods = allowedMethods; } } @Override public boolean shouldAttemptRedirect(HttpPipelineCallContext context, HttpResponse httpResponse, int tryCount, Set<String> attemptedRedirectUrls) { if (isValidRedirectStatusCode(httpResponse.getStatusCode()) && isValidRedirectCount(tryCount) && isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) { String redirectUrl = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); if (redirectUrl != null && !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)) { LOGGER.atVerbose() .addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount) .addKeyValue(REDIRECT_URLS_KEY, () -> attemptedRedirectUrls.toString()) .log("Redirecting."); attemptedRedirectUrls.add(redirectUrl); return true; } else { return false; } } else { return false; } } @Override public HttpRequest createRedirectRequest(HttpResponse httpResponse) { String responseLocation = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); return httpResponse.getRequest().setUrl(responseLocation); } @Override public int getMaxAttempts() { return maxAttempts; } /* * The header name to look up the value for the redirect url in response headers. * * @return the value of the header, or null if the header doesn't exist in the response. */ String getLocationHeader() { return locationHeader; } /* * The {@link HttpMethod http methods} that are allowed to be redirected. * * @return the set of allowed redirect http methods. */ Set<HttpMethod> getAllowedRedirectHttpMethods() { return allowedRedirectHttpMethods; } /** * Check if the redirect url provided in the response headers is already attempted. * * @param redirectUrl the redirect url provided in the response header. * @param attemptedRedirectUrls the set containing a list of attempted redirect locations. * @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect * , {@code false} otherwise. */ private boolean alreadyAttemptedRedirectUrl(String redirectUrl, Set<String> attemptedRedirectUrls) { if (attemptedRedirectUrls.contains(redirectUrl)) { LOGGER.atError() .addKeyValue(LoggingKeys.REDIRECT_URL_KEY, redirectUrl) .log("Request was redirected more than once to the same URL."); return true; } return false; } /** * Check if the attempt count of the redirect is less than the {@code maxAttempts} * * @param tryCount the try count for the HTTP request associated to the HTTP response. * @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise. */ /** * Check if the request http method is a valid redirect method. * * @param httpMethod the http method of the request. * @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise. */ private boolean isAllowedRedirectMethod(HttpMethod httpMethod) { if (getAllowedRedirectHttpMethods().contains(httpMethod)) { return true; } else { LOGGER.atError() .addKeyValue(LoggingKeys.HTTP_METHOD_KEY, httpMethod) .log("Request was redirected from an invalid redirect allowed method."); return false; } } /** * Checks if the incoming request status code is a valid redirect status code. * * @param statusCode the status code of the incoming request. * @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise. */ private boolean isValidRedirectStatusCode(int statusCode) { return statusCode == HttpURLConnection.HTTP_MOVED_TEMP || statusCode == HttpURLConnection.HTTP_MOVED_PERM || statusCode == PERMANENT_REDIRECT_STATUS_CODE || statusCode == TEMPORARY_REDIRECT_STATUS_CODE; } /** * Gets the redirect url from the response headers. * * @param headers the http response headers. * @param headerName the header name to look up value for. * @return the header value for the provided header name, {@code null} otherwise. */ String tryGetRedirectHeader(HttpHeaders headers, String headerName) { String headerValue = headers.getValue(headerName); if (CoreUtils.isNullOrEmpty(headerValue)) { LOGGER.atError() .addKeyValue("headerName", headerName) .log("Redirect url header was null, request redirect was terminated."); return null; } else { return headerValue; } } }
```suggestion .log("Redirect url header was null, request redirect was terminated."); ``` Thoughts?
String tryGetRedirectHeader(HttpHeaders headers, String headerName) { String headerValue = headers.getValue(headerName); if (CoreUtils.isNullOrEmpty(headerValue)) { LOGGER.atError() .addKeyValue("headerName", headerName) .log("Redirect url was null, request redirect was terminated."); return null; } else { return headerValue; } }
.log("Redirect url was null, request redirect was terminated.");
String tryGetRedirectHeader(HttpHeaders headers, String headerName) { String headerValue = headers.getValue(headerName); if (CoreUtils.isNullOrEmpty(headerValue)) { LOGGER.atError() .addKeyValue("headerName", headerName) .log("Redirect url header was null, request redirect was terminated."); return null; } else { return headerValue; } }
class DefaultRedirectStrategy implements RedirectStrategy { private static final ClientLogger LOGGER = new ClientLogger(DefaultRedirectStrategy.class); private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3; private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location"; private static final int PERMANENT_REDIRECT_STATUS_CODE = 308; private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307; private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<>(Arrays.asList(HttpMethod.GET, HttpMethod.HEAD)); private final int maxAttempts; private final String locationHeader; private final Set<HttpMethod> allowedRedirectHttpMethods; /** * Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3, * header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod */ public DefaultRedirectStrategy() { this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and * default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod * * @param maxAttempts The max number of redirect attempts that can be made. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts) { this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy}. * * @param maxAttempts The max number of redirect attempts that can be made. * @param locationHeader The header name containing the redirect URL. * @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) { if (maxAttempts < 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0.")); } this.maxAttempts = maxAttempts; if (CoreUtils.isNullOrEmpty(locationHeader)) { LOGGER.error("'locationHeader' provided as null will be defaulted to {}", DEFAULT_REDIRECT_LOCATION_HEADER_NAME); this.locationHeader = DEFAULT_REDIRECT_LOCATION_HEADER_NAME; } else { this.locationHeader = locationHeader; } if (CoreUtils.isNullOrEmpty(allowedMethods)) { LOGGER.error("'allowedMethods' provided as null will be defaulted to {}", DEFAULT_REDIRECT_ALLOWED_METHODS); this.allowedRedirectHttpMethods = DEFAULT_REDIRECT_ALLOWED_METHODS; } else { this.allowedRedirectHttpMethods = allowedMethods; } } @Override public boolean shouldAttemptRedirect(HttpPipelineCallContext context, HttpResponse httpResponse, int tryCount, Set<String> attemptedRedirectUrls) { if (isValidRedirectStatusCode(httpResponse.getStatusCode()) && isValidRedirectCount(tryCount) && isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) { String redirectUrl = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); if (redirectUrl != null && !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)) { LOGGER.atVerbose() .addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount) .addKeyValue(LoggingKeys.REDIRECT_URL_KEY, redirectUrl) .log("Redirecting."); attemptedRedirectUrls.add(redirectUrl); return true; } else { return false; } } else { return false; } } @Override public HttpRequest createRedirectRequest(HttpResponse httpResponse) { String responseLocation = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); return httpResponse.getRequest().setUrl(responseLocation); } @Override public int getMaxAttempts() { return maxAttempts; } /* * The header name to look up the value for the redirect url in response headers. * * @return the value of the header, or null if the header doesn't exist in the response. */ String getLocationHeader() { return locationHeader; } /* * The {@link HttpMethod http methods} that are allowed to be redirected. * * @return the set of allowed redirect http methods. */ Set<HttpMethod> getAllowedRedirectHttpMethods() { return allowedRedirectHttpMethods; } /** * Check if the redirect url provided in the response headers is already attempted. * * @param redirectUrl the redirect url provided in the response header. * @param attemptedRedirectUrls the set containing a list of attempted redirect locations. * @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect * , {@code false} otherwise. */ private boolean alreadyAttemptedRedirectUrl(String redirectUrl, Set<String> attemptedRedirectUrls) { if (attemptedRedirectUrls.contains(redirectUrl)) { LOGGER.atError() .addKeyValue(LoggingKeys.REDIRECT_URL_KEY, redirectUrl) .log("Request was redirected more than once to the same URL."); return true; } return false; } /** * Check if the attempt count of the redirect is less than the {@code maxAttempts} * * @param tryCount the try count for the HTTP request associated to the HTTP response. * @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise. */ private boolean isValidRedirectCount(int tryCount) { if (tryCount >= getMaxAttempts()) { LOGGER.atError() .addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount) .addKeyValue("maxAttempts", getMaxAttempts()) .log("Redirect attempts have been exhausted."); return false; } return true; } /** * Check if the request http method is a valid redirect method. * * @param httpMethod the http method of the request. * @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise. */ private boolean isAllowedRedirectMethod(HttpMethod httpMethod) { if (getAllowedRedirectHttpMethods().contains(httpMethod)) { return true; } else { LOGGER.atError() .addKeyValue(LoggingKeys.HTTP_METHOD_KEY, httpMethod) .log("Request was redirected from an invalid redirect allowed method."); return false; } } /** * Checks if the incoming request status code is a valid redirect status code. * * @param statusCode the status code of the incoming request. * @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise. */ private boolean isValidRedirectStatusCode(int statusCode) { return statusCode == HttpURLConnection.HTTP_MOVED_TEMP || statusCode == HttpURLConnection.HTTP_MOVED_PERM || statusCode == PERMANENT_REDIRECT_STATUS_CODE || statusCode == TEMPORARY_REDIRECT_STATUS_CODE; } /** * Gets the redirect url from the response headers. * * @param headers the http response headers. * @param headerName the header name to look up value for. * @return the header value for the provided header name, {@code null} otherwise. */ }
class DefaultRedirectStrategy implements RedirectStrategy { private static final ClientLogger LOGGER = new ClientLogger(DefaultRedirectStrategy.class); private static final int DEFAULT_MAX_REDIRECT_ATTEMPTS = 3; private static final String DEFAULT_REDIRECT_LOCATION_HEADER_NAME = "Location"; private static final int PERMANENT_REDIRECT_STATUS_CODE = 308; private static final int TEMPORARY_REDIRECT_STATUS_CODE = 307; private static final Set<HttpMethod> DEFAULT_REDIRECT_ALLOWED_METHODS = new HashSet<>(Arrays.asList(HttpMethod.GET, HttpMethod.HEAD)); private static final String REDIRECT_URLS_KEY = "redirectUrls"; private final int maxAttempts; private final String locationHeader; private final Set<HttpMethod> allowedRedirectHttpMethods; /** * Creates an instance of {@link DefaultRedirectStrategy} with a maximum number of redirect attempts 3, * header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod */ public DefaultRedirectStrategy() { this(DEFAULT_MAX_REDIRECT_ATTEMPTS, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy} with the provided number of redirect attempts and * default header name "Location" to locate the redirect url in the response headers and {@link HttpMethod * and {@link HttpMethod * * @param maxAttempts The max number of redirect attempts that can be made. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts) { this(maxAttempts, DEFAULT_REDIRECT_LOCATION_HEADER_NAME, DEFAULT_REDIRECT_ALLOWED_METHODS); } /** * Creates an instance of {@link DefaultRedirectStrategy}. * * @param maxAttempts The max number of redirect attempts that can be made. * @param locationHeader The header name containing the redirect URL. * @param allowedMethods The set of {@link HttpMethod} that are allowed to be redirected. * @throws IllegalArgumentException if {@code maxAttempts} is less than 0. */ public DefaultRedirectStrategy(int maxAttempts, String locationHeader, Set<HttpMethod> allowedMethods) { if (maxAttempts < 0) { throw LOGGER.logExceptionAsError(new IllegalArgumentException("Max attempts cannot be less than 0.")); } this.maxAttempts = maxAttempts; if (CoreUtils.isNullOrEmpty(locationHeader)) { LOGGER.error("'locationHeader' provided as null will be defaulted to {}", DEFAULT_REDIRECT_LOCATION_HEADER_NAME); this.locationHeader = DEFAULT_REDIRECT_LOCATION_HEADER_NAME; } else { this.locationHeader = locationHeader; } if (CoreUtils.isNullOrEmpty(allowedMethods)) { LOGGER.error("'allowedMethods' provided as null will be defaulted to {}", DEFAULT_REDIRECT_ALLOWED_METHODS); this.allowedRedirectHttpMethods = DEFAULT_REDIRECT_ALLOWED_METHODS; } else { this.allowedRedirectHttpMethods = allowedMethods; } } @Override public boolean shouldAttemptRedirect(HttpPipelineCallContext context, HttpResponse httpResponse, int tryCount, Set<String> attemptedRedirectUrls) { if (isValidRedirectStatusCode(httpResponse.getStatusCode()) && isValidRedirectCount(tryCount) && isAllowedRedirectMethod(httpResponse.getRequest().getHttpMethod())) { String redirectUrl = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); if (redirectUrl != null && !alreadyAttemptedRedirectUrl(redirectUrl, attemptedRedirectUrls)) { LOGGER.atVerbose() .addKeyValue(LoggingKeys.TRY_COUNT_KEY, tryCount) .addKeyValue(REDIRECT_URLS_KEY, () -> attemptedRedirectUrls.toString()) .log("Redirecting."); attemptedRedirectUrls.add(redirectUrl); return true; } else { return false; } } else { return false; } } @Override public HttpRequest createRedirectRequest(HttpResponse httpResponse) { String responseLocation = tryGetRedirectHeader(httpResponse.getHeaders(), getLocationHeader()); return httpResponse.getRequest().setUrl(responseLocation); } @Override public int getMaxAttempts() { return maxAttempts; } /* * The header name to look up the value for the redirect url in response headers. * * @return the value of the header, or null if the header doesn't exist in the response. */ String getLocationHeader() { return locationHeader; } /* * The {@link HttpMethod http methods} that are allowed to be redirected. * * @return the set of allowed redirect http methods. */ Set<HttpMethod> getAllowedRedirectHttpMethods() { return allowedRedirectHttpMethods; } /** * Check if the redirect url provided in the response headers is already attempted. * * @param redirectUrl the redirect url provided in the response header. * @param attemptedRedirectUrls the set containing a list of attempted redirect locations. * @return {@code true} if the redirectUrl provided in the response header is already being attempted for redirect * , {@code false} otherwise. */ private boolean alreadyAttemptedRedirectUrl(String redirectUrl, Set<String> attemptedRedirectUrls) { if (attemptedRedirectUrls.contains(redirectUrl)) { LOGGER.atError() .addKeyValue(LoggingKeys.REDIRECT_URL_KEY, redirectUrl) .log("Request was redirected more than once to the same URL."); return true; } return false; } /** * Check if the attempt count of the redirect is less than the {@code maxAttempts} * * @param tryCount the try count for the HTTP request associated to the HTTP response. * @return {@code true} if the {@code tryCount} is greater than the {@code maxAttempts}, {@code false} otherwise. */ private boolean isValidRedirectCount(int tryCount) { if (tryCount >= getMaxAttempts()) { LOGGER.atError() .addKeyValue("maxAttempts", getMaxAttempts()) .log("Redirect attempts have been exhausted."); return false; } return true; } /** * Check if the request http method is a valid redirect method. * * @param httpMethod the http method of the request. * @return {@code true} if the request {@code httpMethod} is a valid http redirect method, {@code false} otherwise. */ private boolean isAllowedRedirectMethod(HttpMethod httpMethod) { if (getAllowedRedirectHttpMethods().contains(httpMethod)) { return true; } else { LOGGER.atError() .addKeyValue(LoggingKeys.HTTP_METHOD_KEY, httpMethod) .log("Request was redirected from an invalid redirect allowed method."); return false; } } /** * Checks if the incoming request status code is a valid redirect status code. * * @param statusCode the status code of the incoming request. * @return {@code true} if the request {@code statusCode} is a valid http redirect method, {@code false} otherwise. */ private boolean isValidRedirectStatusCode(int statusCode) { return statusCode == HttpURLConnection.HTTP_MOVED_TEMP || statusCode == HttpURLConnection.HTTP_MOVED_PERM || statusCode == PERMANENT_REDIRECT_STATUS_CODE || statusCode == TEMPORARY_REDIRECT_STATUS_CODE; } /** * Gets the redirect url from the response headers. * * @param headers the http response headers. * @param headerName the header name to look up value for. * @return the header value for the provided header name, {@code null} otherwise. */ }
should we use != null here to match the condition below?
private ProxyOptions getProxyOptions() { String config = Configs.getClientTelemetryProxyOptionsConfig(); if (StringUtils.isNotEmpty(config)) { try { JsonProxyOptionsConfig proxyOptionsConfig = Utils.getSimpleObjectMapper().readValue(config, JsonProxyOptionsConfig.class); ProxyOptions.Type type = ProxyOptions.Type.valueOf(proxyOptionsConfig.type); if (type != ProxyOptions.Type.HTTP) { throw new IllegalArgumentException("Only http proxy type is supported."); } if (logger.isDebugEnabled()) { logger.debug( "Enable proxy with type {}, host {}, port {}, userName {}, password length {}", type, proxyOptionsConfig.host, proxyOptionsConfig.port, proxyOptionsConfig.username, proxyOptionsConfig.password != null ? proxyOptionsConfig.password.length() : -1 ); } ProxyOptions proxyOptions = new ProxyOptions( type, new InetSocketAddress(proxyOptionsConfig.host, proxyOptionsConfig.port)); if (!Strings.isNullOrEmpty(proxyOptionsConfig.username) || !Strings.isNullOrEmpty(proxyOptionsConfig.password)) { proxyOptions.setCredentials( proxyOptionsConfig.username != null ? proxyOptionsConfig.username : "", proxyOptionsConfig.password != null ? proxyOptionsConfig.password : ""); } return proxyOptions; } catch (JsonProcessingException e) { logger.error("Failed to parse client telemetry proxy option config", e); } } return null; }
if (!Strings.isNullOrEmpty(proxyOptionsConfig.username) ||
private ProxyOptions getProxyOptions() { String config = Configs.getClientTelemetryProxyOptionsConfig(); if (StringUtils.isNotEmpty(config)) { try { JsonProxyOptionsConfig proxyOptionsConfig = Utils.getSimpleObjectMapper().readValue(config, JsonProxyOptionsConfig.class); ProxyOptions.Type type = ProxyOptions.Type.valueOf(proxyOptionsConfig.type); if (type != ProxyOptions.Type.HTTP) { throw new IllegalArgumentException("Only http proxy type is supported."); } if (logger.isDebugEnabled()) { logger.debug( "Enable proxy with type {}, host {}, port {}, userName {}, password length {}", type, proxyOptionsConfig.host, proxyOptionsConfig.port, proxyOptionsConfig.username, proxyOptionsConfig.password != null ? proxyOptionsConfig.password.length() : -1 ); } ProxyOptions proxyOptions = new ProxyOptions( type, new InetSocketAddress(proxyOptionsConfig.host, proxyOptionsConfig.port)); if (!Strings.isNullOrEmpty(proxyOptionsConfig.username) || !Strings.isNullOrEmpty(proxyOptionsConfig.password)) { proxyOptions.setCredentials( proxyOptionsConfig.username != null ? proxyOptionsConfig.username : "", proxyOptionsConfig.password != null ? proxyOptionsConfig.password : ""); } return proxyOptions; } catch (JsonProcessingException e) { logger.error("Failed to parse client telemetry proxy option config", e); } } return null; }
class ClientTelemetryConfig { private static Logger logger = LoggerFactory.getLogger(ClientTelemetryConfig.class); private static boolean DEFAULT_CLIENT_TELEMETRY_ENABLED = false; private static final Duration DEFAULT_NETWORK_REQUEST_TIMEOUT = Duration.ofSeconds(60); private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final int DEFAULT_MAX_CONNECTION_POOL_SIZE = 1000; private boolean clientTelemetryEnabled; private final Duration httpNetworkRequestTimeout; private final int maxConnectionPoolSize; private final Duration idleHttpConnectionTimeout; private final ProxyOptions proxy; public ClientTelemetryConfig() { this.clientTelemetryEnabled = DEFAULT_CLIENT_TELEMETRY_ENABLED; this.httpNetworkRequestTimeout = DEFAULT_NETWORK_REQUEST_TIMEOUT; this.maxConnectionPoolSize = DEFAULT_MAX_CONNECTION_POOL_SIZE; this.idleHttpConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT; this.proxy = this.getProxyOptions(); } public static ClientTelemetryConfig getDefaultConfig() { return new ClientTelemetryConfig(); } public void setClientTelemetryEnabled(boolean clientTelemetryEnabled) { this.clientTelemetryEnabled = clientTelemetryEnabled; } public boolean isClientTelemetryEnabled() { return this.clientTelemetryEnabled; } public Duration getHttpNetworkRequestTimeout() { return this.httpNetworkRequestTimeout; } public int getMaxConnectionPoolSize() { return this.maxConnectionPoolSize; } public Duration getIdleHttpConnectionTimeout() { return this.idleHttpConnectionTimeout; } public ProxyOptions getProxy() { return this.proxy; } private static class JsonProxyOptionsConfig { @JsonProperty private String host; @JsonProperty private int port; @JsonProperty private String type; @JsonProperty private String username; @JsonProperty private String password; private JsonProxyOptionsConfig() {} private JsonProxyOptionsConfig(String host, int port, String type, String username, String password) { this.host = host; this.port = port; this.type = type; this.username = username; this.password = password; } } }
class ClientTelemetryConfig { private static Logger logger = LoggerFactory.getLogger(ClientTelemetryConfig.class); private static boolean DEFAULT_CLIENT_TELEMETRY_ENABLED = false; private static final Duration DEFAULT_NETWORK_REQUEST_TIMEOUT = Duration.ofSeconds(60); private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final int DEFAULT_MAX_CONNECTION_POOL_SIZE = 1000; private boolean clientTelemetryEnabled; private final Duration httpNetworkRequestTimeout; private final int maxConnectionPoolSize; private final Duration idleHttpConnectionTimeout; private final ProxyOptions proxy; public ClientTelemetryConfig() { this.clientTelemetryEnabled = DEFAULT_CLIENT_TELEMETRY_ENABLED; this.httpNetworkRequestTimeout = DEFAULT_NETWORK_REQUEST_TIMEOUT; this.maxConnectionPoolSize = DEFAULT_MAX_CONNECTION_POOL_SIZE; this.idleHttpConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT; this.proxy = this.getProxyOptions(); } public static ClientTelemetryConfig getDefaultConfig() { return new ClientTelemetryConfig(); } public void setClientTelemetryEnabled(boolean clientTelemetryEnabled) { this.clientTelemetryEnabled = clientTelemetryEnabled; } public boolean isClientTelemetryEnabled() { return this.clientTelemetryEnabled; } public Duration getHttpNetworkRequestTimeout() { return this.httpNetworkRequestTimeout; } public int getMaxConnectionPoolSize() { return this.maxConnectionPoolSize; } public Duration getIdleHttpConnectionTimeout() { return this.idleHttpConnectionTimeout; } public ProxyOptions getProxy() { return this.proxy; } private static class JsonProxyOptionsConfig { @JsonProperty private String host; @JsonProperty private int port; @JsonProperty private String type; @JsonProperty private String username; @JsonProperty private String password; private JsonProxyOptionsConfig() {} private JsonProxyOptionsConfig(String host, int port, String type, String username, String password) { this.host = host; this.port = port; this.type = type; this.username = username; this.password = password; } } }
If both are empty that means we don't need to provide credentials
private ProxyOptions getProxyOptions() { String config = Configs.getClientTelemetryProxyOptionsConfig(); if (StringUtils.isNotEmpty(config)) { try { JsonProxyOptionsConfig proxyOptionsConfig = Utils.getSimpleObjectMapper().readValue(config, JsonProxyOptionsConfig.class); ProxyOptions.Type type = ProxyOptions.Type.valueOf(proxyOptionsConfig.type); if (type != ProxyOptions.Type.HTTP) { throw new IllegalArgumentException("Only http proxy type is supported."); } if (logger.isDebugEnabled()) { logger.debug( "Enable proxy with type {}, host {}, port {}, userName {}, password length {}", type, proxyOptionsConfig.host, proxyOptionsConfig.port, proxyOptionsConfig.username, proxyOptionsConfig.password != null ? proxyOptionsConfig.password.length() : -1 ); } ProxyOptions proxyOptions = new ProxyOptions( type, new InetSocketAddress(proxyOptionsConfig.host, proxyOptionsConfig.port)); if (!Strings.isNullOrEmpty(proxyOptionsConfig.username) || !Strings.isNullOrEmpty(proxyOptionsConfig.password)) { proxyOptions.setCredentials( proxyOptionsConfig.username != null ? proxyOptionsConfig.username : "", proxyOptionsConfig.password != null ? proxyOptionsConfig.password : ""); } return proxyOptions; } catch (JsonProcessingException e) { logger.error("Failed to parse client telemetry proxy option config", e); } } return null; }
if (!Strings.isNullOrEmpty(proxyOptionsConfig.username) ||
private ProxyOptions getProxyOptions() { String config = Configs.getClientTelemetryProxyOptionsConfig(); if (StringUtils.isNotEmpty(config)) { try { JsonProxyOptionsConfig proxyOptionsConfig = Utils.getSimpleObjectMapper().readValue(config, JsonProxyOptionsConfig.class); ProxyOptions.Type type = ProxyOptions.Type.valueOf(proxyOptionsConfig.type); if (type != ProxyOptions.Type.HTTP) { throw new IllegalArgumentException("Only http proxy type is supported."); } if (logger.isDebugEnabled()) { logger.debug( "Enable proxy with type {}, host {}, port {}, userName {}, password length {}", type, proxyOptionsConfig.host, proxyOptionsConfig.port, proxyOptionsConfig.username, proxyOptionsConfig.password != null ? proxyOptionsConfig.password.length() : -1 ); } ProxyOptions proxyOptions = new ProxyOptions( type, new InetSocketAddress(proxyOptionsConfig.host, proxyOptionsConfig.port)); if (!Strings.isNullOrEmpty(proxyOptionsConfig.username) || !Strings.isNullOrEmpty(proxyOptionsConfig.password)) { proxyOptions.setCredentials( proxyOptionsConfig.username != null ? proxyOptionsConfig.username : "", proxyOptionsConfig.password != null ? proxyOptionsConfig.password : ""); } return proxyOptions; } catch (JsonProcessingException e) { logger.error("Failed to parse client telemetry proxy option config", e); } } return null; }
class ClientTelemetryConfig { private static Logger logger = LoggerFactory.getLogger(ClientTelemetryConfig.class); private static boolean DEFAULT_CLIENT_TELEMETRY_ENABLED = false; private static final Duration DEFAULT_NETWORK_REQUEST_TIMEOUT = Duration.ofSeconds(60); private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final int DEFAULT_MAX_CONNECTION_POOL_SIZE = 1000; private boolean clientTelemetryEnabled; private final Duration httpNetworkRequestTimeout; private final int maxConnectionPoolSize; private final Duration idleHttpConnectionTimeout; private final ProxyOptions proxy; public ClientTelemetryConfig() { this.clientTelemetryEnabled = DEFAULT_CLIENT_TELEMETRY_ENABLED; this.httpNetworkRequestTimeout = DEFAULT_NETWORK_REQUEST_TIMEOUT; this.maxConnectionPoolSize = DEFAULT_MAX_CONNECTION_POOL_SIZE; this.idleHttpConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT; this.proxy = this.getProxyOptions(); } public static ClientTelemetryConfig getDefaultConfig() { return new ClientTelemetryConfig(); } public void setClientTelemetryEnabled(boolean clientTelemetryEnabled) { this.clientTelemetryEnabled = clientTelemetryEnabled; } public boolean isClientTelemetryEnabled() { return this.clientTelemetryEnabled; } public Duration getHttpNetworkRequestTimeout() { return this.httpNetworkRequestTimeout; } public int getMaxConnectionPoolSize() { return this.maxConnectionPoolSize; } public Duration getIdleHttpConnectionTimeout() { return this.idleHttpConnectionTimeout; } public ProxyOptions getProxy() { return this.proxy; } private static class JsonProxyOptionsConfig { @JsonProperty private String host; @JsonProperty private int port; @JsonProperty private String type; @JsonProperty private String username; @JsonProperty private String password; private JsonProxyOptionsConfig() {} private JsonProxyOptionsConfig(String host, int port, String type, String username, String password) { this.host = host; this.port = port; this.type = type; this.username = username; this.password = password; } } }
class ClientTelemetryConfig { private static Logger logger = LoggerFactory.getLogger(ClientTelemetryConfig.class); private static boolean DEFAULT_CLIENT_TELEMETRY_ENABLED = false; private static final Duration DEFAULT_NETWORK_REQUEST_TIMEOUT = Duration.ofSeconds(60); private static final Duration DEFAULT_IDLE_CONNECTION_TIMEOUT = Duration.ofSeconds(60); private static final int DEFAULT_MAX_CONNECTION_POOL_SIZE = 1000; private boolean clientTelemetryEnabled; private final Duration httpNetworkRequestTimeout; private final int maxConnectionPoolSize; private final Duration idleHttpConnectionTimeout; private final ProxyOptions proxy; public ClientTelemetryConfig() { this.clientTelemetryEnabled = DEFAULT_CLIENT_TELEMETRY_ENABLED; this.httpNetworkRequestTimeout = DEFAULT_NETWORK_REQUEST_TIMEOUT; this.maxConnectionPoolSize = DEFAULT_MAX_CONNECTION_POOL_SIZE; this.idleHttpConnectionTimeout = DEFAULT_IDLE_CONNECTION_TIMEOUT; this.proxy = this.getProxyOptions(); } public static ClientTelemetryConfig getDefaultConfig() { return new ClientTelemetryConfig(); } public void setClientTelemetryEnabled(boolean clientTelemetryEnabled) { this.clientTelemetryEnabled = clientTelemetryEnabled; } public boolean isClientTelemetryEnabled() { return this.clientTelemetryEnabled; } public Duration getHttpNetworkRequestTimeout() { return this.httpNetworkRequestTimeout; } public int getMaxConnectionPoolSize() { return this.maxConnectionPoolSize; } public Duration getIdleHttpConnectionTimeout() { return this.idleHttpConnectionTimeout; } public ProxyOptions getProxy() { return this.proxy; } private static class JsonProxyOptionsConfig { @JsonProperty private String host; @JsonProperty private int port; @JsonProperty private String type; @JsonProperty private String username; @JsonProperty private String password; private JsonProxyOptionsConfig() {} private JsonProxyOptionsConfig(String host, int port, String type, String username, String password) { this.host = host; this.port = port; this.type = type; this.username = username; this.password = password; } } }
I believe it should be `distinctUntilChanged()`. `distinct` keeps a set, so if you see INACTIVE -> ACTIVE -> INACTIVE states, it'll only output INACTIVE and ACTIVE but not the last state.
public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates .distinct() .takeUntilOther(this.terminateEndpointStates.asMono()); }
.distinct()
public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates .distinctUntilChanged() .takeUntilOther(this.terminateEndpointStates.asMono()); }
class ReactorReceiver implements AmqpReceiveLink, AsyncCloseable, AutoCloseable { private final String entityPath; private final Receiver receiver; private final ReceiveLinkHandler handler; private final TokenManager tokenManager; private final ReactorDispatcher dispatcher; private final Disposable subscriptions; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isCompleteCloseCalled = new AtomicBoolean(); private final Flux<Message> messagesProcessor; private final AmqpRetryOptions retryOptions; private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<AmqpEndpointState> terminateEndpointStates = Sinks.empty(); private final AtomicReference<Supplier<Integer>> creditSupplier = new AtomicReference<>(); protected ReactorReceiver(AmqpConnection amqpConnection, String entityPath, Receiver receiver, ReceiveLinkHandler handler, TokenManager tokenManager, ReactorDispatcher dispatcher, AmqpRetryOptions retryOptions) { this.entityPath = entityPath; this.receiver = receiver; this.handler = handler; this.tokenManager = tokenManager; this.dispatcher = dispatcher; Map<String, Object> loggingContext = createContextWithConnectionId(handler.getConnectionId()); loggingContext.put(LINK_NAME_KEY, this.handler.getLinkName()); this.logger = new ClientLogger(ReactorReceiver.class, loggingContext); this.messagesProcessor = this.handler.getDeliveredMessages() .flatMap(delivery -> { return Mono.create(sink -> { try { this.dispatcher.invoke(() -> { if (isDisposed()) { sink.error(new IllegalStateException( "Cannot decode delivery when ReactorReceiver instance is closed.")); return; } final Message message = decodeDelivery(delivery); final int creditsLeft = receiver.getRemoteCredit(); if (creditsLeft > 0) { sink.success(message); return; } final Supplier<Integer> supplier = creditSupplier.get(); final Integer credits = supplier.get(); if (credits != null && credits > 0) { logger.atInfo() .addKeyValue("credits", credits) .log("Adding credits."); receiver.flow(credits); } else { logger.atVerbose() .addKeyValue("credits", credits) .log("There are no credits to add."); } sink.success(message); }); } catch (IOException | RejectedExecutionException e) { sink.error(e); } }); }, 1); this.retryOptions = retryOptions; this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.atVerbose() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed. Dropping error." : "Freeing resources due to error."; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(message); completeClose(); }) .doOnComplete(() -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed." : "Freeing resources."; logger.atVerbose() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(message); completeClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.tokenManager.getAuthorizationResults() .onErrorResume(error -> { final Mono<Void> operation = closeAsync("Token renewal failure. Disposing receive link.", new ErrorCondition(Symbol.getSymbol(AmqpErrorCondition.NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> logger.atVerbose() .addKeyValue("response", response) .log("Token refreshed."), error -> { }, () -> { logger.atVerbose() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Authorization completed."); closeAsync("Authorization completed. Disposing.", null).subscribe(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); return closeAsync("Connection shutdown.", null); }).subscribe()); } @Override @Override public Flux<Message> receive() { return messagesProcessor; } @Override public Mono<Void> addCredits(int credits) { if (isDisposed()) { return monoError(logger, new IllegalStateException("Cannot add credits to closed link: " + getLinkName())); } return Mono.create(sink -> { try { dispatcher.invoke(() -> { receiver.flow(credits); sink.success(); }); } catch (IOException e) { sink.error(new UncheckedIOException(String.format( "connectionId[%s] linkName[%s] Unable to schedule work to add more credits.", handler.getConnectionId(), getLinkName()), e)); } catch (RejectedExecutionException e) { sink.error(e); } }); } @Override public int getCredits() { return receiver.getRemoteCredit(); } @Override public void setEmptyCreditListener(Supplier<Integer> creditSupplier) { Objects.requireNonNull(creditSupplier, "'creditSupplier' cannot be null."); this.creditSupplier.set(creditSupplier); } @Override public String getLinkName() { return handler.getLinkName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public boolean isDisposed() { return isDisposed.get(); } @Override public void dispose() { close(); } @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } protected Message decodeDelivery(Delivery delivery) { final int messageSize = delivery.pending(); final byte[] buffer = new byte[messageSize]; final int read = receiver.recv(buffer, 0, messageSize); receiver.advance(); final Message message = Proton.message(); message.decode(buffer, 0, read); delivery.settle(); return message; } /** * Disposes of the receiver when an exception is encountered. * <p> * While {@link ReactorReceiver * contract, this API performs the same disposal with additional * contextual information. For example, the context may indicate if the resource needs to be disposed of * internally when there is an error in the link, session or connection. * </p> * * <p> * Closing ReactorReceiver involves 3 stages, running in following order - * <ul> * <li>local-close (client to broker) via beginClose() </li> * <li>remote-close ack (broker to client)</li> * <li>disposal of ReactorReceiver resources via completeClose()</li> * </ul> * Ref:https: * * @param message Message to log. * @param errorCondition Error condition associated with close operation. */ protected Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return getIsClosedMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Setting error condition and disposing. {}", message); return beginClose(errorCondition) .flatMap(localCloseScheduled -> { if (localCloseScheduled) { return timeoutRemoteCloseAck(); } else { return Mono.empty(); } }) .publishOn(Schedulers.boundedElastic()); } /** * Gets the Mono that signals completion when the disposal/closing of ReactorReceiver is completed. * * @return the disposal/closing completion Mono. */ protected Mono<Void> getIsClosedMono() { return isClosedMono.asMono().publishOn(Schedulers.boundedElastic()); } /** * Beings the client side close by initiating local-close on underlying receiver. * * @param errorCondition Error condition associated with close operation. * @return a {@link Mono} when subscribed attempt to initiate local-close, emitting {@code true} * if local-close is scheduled on the dispatcher, emits {@code false} if unable to schedule * local-close that lead to manual close. */ private Mono<Boolean> beginClose(ErrorCondition errorCondition) { final Runnable localClose = () -> { if (receiver.getLocalState() != EndpointState.CLOSED) { receiver.close(); if (receiver.getCondition() == null) { receiver.setCondition(errorCondition); } } }; return Mono.create(sink -> { boolean localCloseScheduled = false; try { dispatcher.invoke(localClose); localCloseScheduled = true; } catch (IOException e) { logger.warning("IO sink was closed when scheduling work. Manually invoking and completing close.", e); localClose.run(); terminateEndpointState(); completeClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException when scheduling on ReactorDispatcher. Manually invoking and completing close."); localClose.run(); terminateEndpointState(); completeClose(); } finally { sink.success(localCloseScheduled); } }); } /** * Apply timeout on remote-close ack. If timeout happens, i.e., if remote-close ack doesn't arrive within * the timeout duration, then terminate the Flux returned by getEndpointStates() and complete close. * * a {@link Mono} that registers remote-close ack timeout based close cleanup. */ private Mono<Void> timeoutRemoteCloseAck() { return isClosedMono.asMono() .timeout(retryOptions.getTryTimeout()) .onErrorResume(error -> { if (error instanceof TimeoutException) { logger.info("Timeout waiting for RemoteClose. Manually terminating EndpointStates and completing close."); terminateEndpointState(); completeClose(); } return Mono.empty(); }) .subscribeOn(Schedulers.boundedElastic()); } /** * Terminate the Flux returned by the getEndpointStates() API. * * <p> * The termination of Flux returned by getEndpointStates() is the signal that "AmqpReceiveLinkProcessor" * uses to either terminate its downstream or obtain a new ReactorReceiver to continue delivering events * downstream. * </p> */ private void terminateEndpointState() { this.terminateEndpointStates.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atVerbose(), signalType, emitResult) .log("Could not emit EndpointStates termination."); return false; }); } /** * Completes the closing of the underlying receiver, which includes disposing of subscriptions, * closing of token manager, and releasing of protonJ resources. * <p> * The completeClose invoked in 3 cases - when the broker ack for beginClose (i.e. ack via * remote-close frame), if the broker ack for beginClose never comes through within timeout, * if the client fails to run beginClose. * </p> */ private void completeClose() { if (isCompleteCloseCalled.getAndSet(true)) { return; } isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } handler.close(); receiver.free(); } @Override public String toString() { return String.format("connectionId: [%s] entity path: [%s] linkName: [%s]", receiver.getName(), entityPath, getLinkName()); } }
class ReactorReceiver implements AmqpReceiveLink, AsyncCloseable, AutoCloseable { private final String entityPath; private final Receiver receiver; private final ReceiveLinkHandler handler; private final TokenManager tokenManager; private final ReactorDispatcher dispatcher; private final Disposable subscriptions; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isCompleteCloseCalled = new AtomicBoolean(); private final Flux<Message> messagesProcessor; private final AmqpRetryOptions retryOptions; private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<AmqpEndpointState> terminateEndpointStates = Sinks.empty(); private final AtomicReference<Supplier<Integer>> creditSupplier = new AtomicReference<>(); protected ReactorReceiver(AmqpConnection amqpConnection, String entityPath, Receiver receiver, ReceiveLinkHandler handler, TokenManager tokenManager, ReactorDispatcher dispatcher, AmqpRetryOptions retryOptions) { this.entityPath = entityPath; this.receiver = receiver; this.handler = handler; this.tokenManager = tokenManager; this.dispatcher = dispatcher; Map<String, Object> loggingContext = createContextWithConnectionId(handler.getConnectionId()); loggingContext.put(LINK_NAME_KEY, this.handler.getLinkName()); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorReceiver.class, loggingContext); this.messagesProcessor = this.handler.getDeliveredMessages() .flatMap(delivery -> { return Mono.create(sink -> { try { this.dispatcher.invoke(() -> { if (isDisposed()) { sink.error(new IllegalStateException( "Cannot decode delivery when ReactorReceiver instance is closed.")); return; } final Message message = decodeDelivery(delivery); final int creditsLeft = receiver.getRemoteCredit(); if (creditsLeft > 0) { sink.success(message); return; } final Supplier<Integer> supplier = creditSupplier.get(); final Integer credits = supplier.get(); if (credits != null && credits > 0) { logger.atInfo() .addKeyValue("credits", credits) .log("Adding credits."); receiver.flow(credits); } else { logger.atVerbose() .addKeyValue("credits", credits) .log("There are no credits to add."); } sink.success(message); }); } catch (IOException | RejectedExecutionException e) { sink.error(e); } }); }, 1); this.retryOptions = retryOptions; this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.atVerbose() .log("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed. Dropping error." : "Freeing resources due to error."; logger.atInfo() .log(message); completeClose(); }) .doOnComplete(() -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed." : "Freeing resources."; logger.atVerbose() .log(message); completeClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.tokenManager.getAuthorizationResults() .onErrorResume(error -> { final Mono<Void> operation = closeAsync("Token renewal failure. Disposing receive link.", new ErrorCondition(Symbol.getSymbol(AmqpErrorCondition.NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> logger.atVerbose() .addKeyValue("response", response) .log("Token refreshed."), error -> { }, () -> { logger.atVerbose() .log("Authorization completed."); closeAsync("Authorization completed. Disposing.", null).subscribe(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); return closeAsync("Connection shutdown.", null); }).subscribe()); } @Override @Override public Flux<Message> receive() { return messagesProcessor; } @Override public Mono<Void> addCredits(int credits) { if (isDisposed()) { return monoError(logger, new IllegalStateException("Cannot add credits to closed link: " + getLinkName())); } return Mono.create(sink -> { try { dispatcher.invoke(() -> { receiver.flow(credits); sink.success(); }); } catch (IOException e) { sink.error(new UncheckedIOException(String.format( "connectionId[%s] linkName[%s] Unable to schedule work to add more credits.", handler.getConnectionId(), getLinkName()), e)); } catch (RejectedExecutionException e) { sink.error(e); } }); } @Override public int getCredits() { return receiver.getRemoteCredit(); } @Override public void setEmptyCreditListener(Supplier<Integer> creditSupplier) { Objects.requireNonNull(creditSupplier, "'creditSupplier' cannot be null."); this.creditSupplier.set(creditSupplier); } @Override public String getLinkName() { return handler.getLinkName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public boolean isDisposed() { return isDisposed.get(); } @Override public void dispose() { close(); } @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } protected Message decodeDelivery(Delivery delivery) { final int messageSize = delivery.pending(); final byte[] buffer = new byte[messageSize]; final int read = receiver.recv(buffer, 0, messageSize); receiver.advance(); final Message message = Proton.message(); message.decode(buffer, 0, read); delivery.settle(); return message; } /** * Disposes of the receiver when an exception is encountered. * <p> * While {@link ReactorReceiver * contract, this API performs the same disposal with additional * contextual information. For example, the context may indicate if the resource needs to be disposed of * internally when there is an error in the link, session or connection. * </p> * * <p> * Closing ReactorReceiver involves 3 stages, running in following order - * <ul> * <li>local-close (client to broker) via beginClose() </li> * <li>remote-close ack (broker to client)</li> * <li>disposal of ReactorReceiver resources via completeClose()</li> * </ul> * @link <a href="https: * * @param message Message to log. * @param errorCondition Error condition associated with close operation. */ protected Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return getIsClosedMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); return beginClose(errorCondition) .flatMap(localCloseScheduled -> { if (localCloseScheduled) { return timeoutRemoteCloseAck(); } else { return Mono.empty(); } }) .publishOn(Schedulers.boundedElastic()); } /** * Gets the Mono that signals completion when the disposal/closing of ReactorReceiver is completed. * * @return the disposal/closing completion Mono. */ protected Mono<Void> getIsClosedMono() { return isClosedMono.asMono().publishOn(Schedulers.boundedElastic()); } /** * Beings the client side close by initiating local-close on underlying receiver. * * @param errorCondition Error condition associated with close operation. * @return a {@link Mono} when subscribed attempt to initiate local-close, emitting {@code true} * if local-close is scheduled on the dispatcher, emits {@code false} if unable to schedule * local-close that lead to manual close. */ private Mono<Boolean> beginClose(ErrorCondition errorCondition) { final Runnable localClose = () -> { if (receiver.getLocalState() != EndpointState.CLOSED) { receiver.close(); if (receiver.getCondition() == null) { receiver.setCondition(errorCondition); } } }; return Mono.create(sink -> { boolean localCloseScheduled = false; try { dispatcher.invoke(localClose); localCloseScheduled = true; } catch (IOException e) { logger.warning("IO sink was closed when scheduling work. Manually invoking and completing close.", e); localClose.run(); terminateEndpointState(); completeClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException when scheduling on ReactorDispatcher. Manually invoking and completing close."); localClose.run(); terminateEndpointState(); completeClose(); } finally { sink.success(localCloseScheduled); } }); } /** * Apply timeout on remote-close ack. If timeout happens, i.e., if remote-close ack doesn't arrive within * the timeout duration, then terminate the Flux returned by getEndpointStates() and complete close. * * a {@link Mono} that registers remote-close ack timeout based close cleanup. */ private Mono<Void> timeoutRemoteCloseAck() { return isClosedMono.asMono() .timeout(retryOptions.getTryTimeout()) .onErrorResume(error -> { if (error instanceof TimeoutException) { logger.info("Timeout waiting for RemoteClose. Manually terminating EndpointStates and completing close."); terminateEndpointState(); completeClose(); } return Mono.empty(); }) .subscribeOn(Schedulers.boundedElastic()); } /** * Terminate the Flux returned by the getEndpointStates() API. * * <p> * The termination of Flux returned by getEndpointStates() is the signal that "AmqpReceiveLinkProcessor" * uses to either terminate its downstream or obtain a new ReactorReceiver to continue delivering events * downstream. * </p> */ private void terminateEndpointState() { this.terminateEndpointStates.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atVerbose(), signalType, emitResult) .log("Could not emit EndpointStates termination."); return false; }); } /** * Completes the closing of the underlying receiver, which includes disposing of subscriptions, * closing of token manager, and releasing of protonJ resources. * <p> * The completeClose invoked in 3 cases - when the broker ack for beginClose (i.e. ack via * remote-close frame), if the broker ack for beginClose never comes through within timeout, * if the client fails to run beginClose. * </p> */ private void completeClose() { if (isCompleteCloseCalled.getAndSet(true)) { return; } isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } handler.close(); receiver.free(); } @Override public String toString() { return String.format("connectionId: [%s] entity path: [%s] linkName: [%s]", receiver.getName(), entityPath, getLinkName()); } }
ah, I see, good point! will switch to `distinctUntilChanged` .
public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates .distinct() .takeUntilOther(this.terminateEndpointStates.asMono()); }
.distinct()
public Flux<AmqpEndpointState> getEndpointStates() { return endpointStates .distinctUntilChanged() .takeUntilOther(this.terminateEndpointStates.asMono()); }
class ReactorReceiver implements AmqpReceiveLink, AsyncCloseable, AutoCloseable { private final String entityPath; private final Receiver receiver; private final ReceiveLinkHandler handler; private final TokenManager tokenManager; private final ReactorDispatcher dispatcher; private final Disposable subscriptions; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isCompleteCloseCalled = new AtomicBoolean(); private final Flux<Message> messagesProcessor; private final AmqpRetryOptions retryOptions; private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<AmqpEndpointState> terminateEndpointStates = Sinks.empty(); private final AtomicReference<Supplier<Integer>> creditSupplier = new AtomicReference<>(); protected ReactorReceiver(AmqpConnection amqpConnection, String entityPath, Receiver receiver, ReceiveLinkHandler handler, TokenManager tokenManager, ReactorDispatcher dispatcher, AmqpRetryOptions retryOptions) { this.entityPath = entityPath; this.receiver = receiver; this.handler = handler; this.tokenManager = tokenManager; this.dispatcher = dispatcher; Map<String, Object> loggingContext = createContextWithConnectionId(handler.getConnectionId()); loggingContext.put(LINK_NAME_KEY, this.handler.getLinkName()); this.logger = new ClientLogger(ReactorReceiver.class, loggingContext); this.messagesProcessor = this.handler.getDeliveredMessages() .flatMap(delivery -> { return Mono.create(sink -> { try { this.dispatcher.invoke(() -> { if (isDisposed()) { sink.error(new IllegalStateException( "Cannot decode delivery when ReactorReceiver instance is closed.")); return; } final Message message = decodeDelivery(delivery); final int creditsLeft = receiver.getRemoteCredit(); if (creditsLeft > 0) { sink.success(message); return; } final Supplier<Integer> supplier = creditSupplier.get(); final Integer credits = supplier.get(); if (credits != null && credits > 0) { logger.atInfo() .addKeyValue("credits", credits) .log("Adding credits."); receiver.flow(credits); } else { logger.atVerbose() .addKeyValue("credits", credits) .log("There are no credits to add."); } sink.success(message); }); } catch (IOException | RejectedExecutionException e) { sink.error(e); } }); }, 1); this.retryOptions = retryOptions; this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.atVerbose() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed. Dropping error." : "Freeing resources due to error."; logger.atInfo() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(message); completeClose(); }) .doOnComplete(() -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed." : "Freeing resources."; logger.atVerbose() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log(message); completeClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.tokenManager.getAuthorizationResults() .onErrorResume(error -> { final Mono<Void> operation = closeAsync("Token renewal failure. Disposing receive link.", new ErrorCondition(Symbol.getSymbol(AmqpErrorCondition.NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> logger.atVerbose() .addKeyValue("response", response) .log("Token refreshed."), error -> { }, () -> { logger.atVerbose() .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Authorization completed."); closeAsync("Authorization completed. Disposing.", null).subscribe(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); return closeAsync("Connection shutdown.", null); }).subscribe()); } @Override @Override public Flux<Message> receive() { return messagesProcessor; } @Override public Mono<Void> addCredits(int credits) { if (isDisposed()) { return monoError(logger, new IllegalStateException("Cannot add credits to closed link: " + getLinkName())); } return Mono.create(sink -> { try { dispatcher.invoke(() -> { receiver.flow(credits); sink.success(); }); } catch (IOException e) { sink.error(new UncheckedIOException(String.format( "connectionId[%s] linkName[%s] Unable to schedule work to add more credits.", handler.getConnectionId(), getLinkName()), e)); } catch (RejectedExecutionException e) { sink.error(e); } }); } @Override public int getCredits() { return receiver.getRemoteCredit(); } @Override public void setEmptyCreditListener(Supplier<Integer> creditSupplier) { Objects.requireNonNull(creditSupplier, "'creditSupplier' cannot be null."); this.creditSupplier.set(creditSupplier); } @Override public String getLinkName() { return handler.getLinkName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public boolean isDisposed() { return isDisposed.get(); } @Override public void dispose() { close(); } @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } protected Message decodeDelivery(Delivery delivery) { final int messageSize = delivery.pending(); final byte[] buffer = new byte[messageSize]; final int read = receiver.recv(buffer, 0, messageSize); receiver.advance(); final Message message = Proton.message(); message.decode(buffer, 0, read); delivery.settle(); return message; } /** * Disposes of the receiver when an exception is encountered. * <p> * While {@link ReactorReceiver * contract, this API performs the same disposal with additional * contextual information. For example, the context may indicate if the resource needs to be disposed of * internally when there is an error in the link, session or connection. * </p> * * <p> * Closing ReactorReceiver involves 3 stages, running in following order - * <ul> * <li>local-close (client to broker) via beginClose() </li> * <li>remote-close ack (broker to client)</li> * <li>disposal of ReactorReceiver resources via completeClose()</li> * </ul> * Ref:https: * * @param message Message to log. * @param errorCondition Error condition associated with close operation. */ protected Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return getIsClosedMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .addKeyValue(ENTITY_PATH_KEY, entityPath) .log("Setting error condition and disposing. {}", message); return beginClose(errorCondition) .flatMap(localCloseScheduled -> { if (localCloseScheduled) { return timeoutRemoteCloseAck(); } else { return Mono.empty(); } }) .publishOn(Schedulers.boundedElastic()); } /** * Gets the Mono that signals completion when the disposal/closing of ReactorReceiver is completed. * * @return the disposal/closing completion Mono. */ protected Mono<Void> getIsClosedMono() { return isClosedMono.asMono().publishOn(Schedulers.boundedElastic()); } /** * Beings the client side close by initiating local-close on underlying receiver. * * @param errorCondition Error condition associated with close operation. * @return a {@link Mono} when subscribed attempt to initiate local-close, emitting {@code true} * if local-close is scheduled on the dispatcher, emits {@code false} if unable to schedule * local-close that lead to manual close. */ private Mono<Boolean> beginClose(ErrorCondition errorCondition) { final Runnable localClose = () -> { if (receiver.getLocalState() != EndpointState.CLOSED) { receiver.close(); if (receiver.getCondition() == null) { receiver.setCondition(errorCondition); } } }; return Mono.create(sink -> { boolean localCloseScheduled = false; try { dispatcher.invoke(localClose); localCloseScheduled = true; } catch (IOException e) { logger.warning("IO sink was closed when scheduling work. Manually invoking and completing close.", e); localClose.run(); terminateEndpointState(); completeClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException when scheduling on ReactorDispatcher. Manually invoking and completing close."); localClose.run(); terminateEndpointState(); completeClose(); } finally { sink.success(localCloseScheduled); } }); } /** * Apply timeout on remote-close ack. If timeout happens, i.e., if remote-close ack doesn't arrive within * the timeout duration, then terminate the Flux returned by getEndpointStates() and complete close. * * a {@link Mono} that registers remote-close ack timeout based close cleanup. */ private Mono<Void> timeoutRemoteCloseAck() { return isClosedMono.asMono() .timeout(retryOptions.getTryTimeout()) .onErrorResume(error -> { if (error instanceof TimeoutException) { logger.info("Timeout waiting for RemoteClose. Manually terminating EndpointStates and completing close."); terminateEndpointState(); completeClose(); } return Mono.empty(); }) .subscribeOn(Schedulers.boundedElastic()); } /** * Terminate the Flux returned by the getEndpointStates() API. * * <p> * The termination of Flux returned by getEndpointStates() is the signal that "AmqpReceiveLinkProcessor" * uses to either terminate its downstream or obtain a new ReactorReceiver to continue delivering events * downstream. * </p> */ private void terminateEndpointState() { this.terminateEndpointStates.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atVerbose(), signalType, emitResult) .log("Could not emit EndpointStates termination."); return false; }); } /** * Completes the closing of the underlying receiver, which includes disposing of subscriptions, * closing of token manager, and releasing of protonJ resources. * <p> * The completeClose invoked in 3 cases - when the broker ack for beginClose (i.e. ack via * remote-close frame), if the broker ack for beginClose never comes through within timeout, * if the client fails to run beginClose. * </p> */ private void completeClose() { if (isCompleteCloseCalled.getAndSet(true)) { return; } isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } handler.close(); receiver.free(); } @Override public String toString() { return String.format("connectionId: [%s] entity path: [%s] linkName: [%s]", receiver.getName(), entityPath, getLinkName()); } }
class ReactorReceiver implements AmqpReceiveLink, AsyncCloseable, AutoCloseable { private final String entityPath; private final Receiver receiver; private final ReceiveLinkHandler handler; private final TokenManager tokenManager; private final ReactorDispatcher dispatcher; private final Disposable subscriptions; private final AtomicBoolean isDisposed = new AtomicBoolean(); private final Sinks.Empty<Void> isClosedMono = Sinks.empty(); private final AtomicBoolean isCompleteCloseCalled = new AtomicBoolean(); private final Flux<Message> messagesProcessor; private final AmqpRetryOptions retryOptions; private final ClientLogger logger; private final Flux<AmqpEndpointState> endpointStates; private final Sinks.Empty<AmqpEndpointState> terminateEndpointStates = Sinks.empty(); private final AtomicReference<Supplier<Integer>> creditSupplier = new AtomicReference<>(); protected ReactorReceiver(AmqpConnection amqpConnection, String entityPath, Receiver receiver, ReceiveLinkHandler handler, TokenManager tokenManager, ReactorDispatcher dispatcher, AmqpRetryOptions retryOptions) { this.entityPath = entityPath; this.receiver = receiver; this.handler = handler; this.tokenManager = tokenManager; this.dispatcher = dispatcher; Map<String, Object> loggingContext = createContextWithConnectionId(handler.getConnectionId()); loggingContext.put(LINK_NAME_KEY, this.handler.getLinkName()); loggingContext.put(ENTITY_PATH_KEY, entityPath); this.logger = new ClientLogger(ReactorReceiver.class, loggingContext); this.messagesProcessor = this.handler.getDeliveredMessages() .flatMap(delivery -> { return Mono.create(sink -> { try { this.dispatcher.invoke(() -> { if (isDisposed()) { sink.error(new IllegalStateException( "Cannot decode delivery when ReactorReceiver instance is closed.")); return; } final Message message = decodeDelivery(delivery); final int creditsLeft = receiver.getRemoteCredit(); if (creditsLeft > 0) { sink.success(message); return; } final Supplier<Integer> supplier = creditSupplier.get(); final Integer credits = supplier.get(); if (credits != null && credits > 0) { logger.atInfo() .addKeyValue("credits", credits) .log("Adding credits."); receiver.flow(credits); } else { logger.atVerbose() .addKeyValue("credits", credits) .log("There are no credits to add."); } sink.success(message); }); } catch (IOException | RejectedExecutionException e) { sink.error(e); } }); }, 1); this.retryOptions = retryOptions; this.endpointStates = this.handler.getEndpointStates() .map(state -> { logger.atVerbose() .log("State {}", state); return AmqpEndpointStateUtil.getConnectionState(state); }) .doOnError(error -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed. Dropping error." : "Freeing resources due to error."; logger.atInfo() .log(message); completeClose(); }) .doOnComplete(() -> { final String message = isDisposed.getAndSet(true) ? "This was already disposed." : "Freeing resources."; logger.atVerbose() .log(message); completeClose(); }) .cache(1); this.subscriptions = Disposables.composite( this.endpointStates.subscribe(), this.tokenManager.getAuthorizationResults() .onErrorResume(error -> { final Mono<Void> operation = closeAsync("Token renewal failure. Disposing receive link.", new ErrorCondition(Symbol.getSymbol(AmqpErrorCondition.NOT_ALLOWED.getErrorCondition()), error.getMessage())); return operation.then(Mono.empty()); }).subscribe(response -> logger.atVerbose() .addKeyValue("response", response) .log("Token refreshed."), error -> { }, () -> { logger.atVerbose() .log("Authorization completed."); closeAsync("Authorization completed. Disposing.", null).subscribe(); }), amqpConnection.getShutdownSignals().flatMap(signal -> { logger.verbose("Shutdown signal received."); return closeAsync("Connection shutdown.", null); }).subscribe()); } @Override @Override public Flux<Message> receive() { return messagesProcessor; } @Override public Mono<Void> addCredits(int credits) { if (isDisposed()) { return monoError(logger, new IllegalStateException("Cannot add credits to closed link: " + getLinkName())); } return Mono.create(sink -> { try { dispatcher.invoke(() -> { receiver.flow(credits); sink.success(); }); } catch (IOException e) { sink.error(new UncheckedIOException(String.format( "connectionId[%s] linkName[%s] Unable to schedule work to add more credits.", handler.getConnectionId(), getLinkName()), e)); } catch (RejectedExecutionException e) { sink.error(e); } }); } @Override public int getCredits() { return receiver.getRemoteCredit(); } @Override public void setEmptyCreditListener(Supplier<Integer> creditSupplier) { Objects.requireNonNull(creditSupplier, "'creditSupplier' cannot be null."); this.creditSupplier.set(creditSupplier); } @Override public String getLinkName() { return handler.getLinkName(); } @Override public String getEntityPath() { return entityPath; } @Override public String getHostname() { return handler.getHostname(); } @Override public boolean isDisposed() { return isDisposed.get(); } @Override public void dispose() { close(); } @Override public void close() { closeAsync().block(retryOptions.getTryTimeout()); } @Override public Mono<Void> closeAsync() { return closeAsync("User invoked close operation.", null); } protected Message decodeDelivery(Delivery delivery) { final int messageSize = delivery.pending(); final byte[] buffer = new byte[messageSize]; final int read = receiver.recv(buffer, 0, messageSize); receiver.advance(); final Message message = Proton.message(); message.decode(buffer, 0, read); delivery.settle(); return message; } /** * Disposes of the receiver when an exception is encountered. * <p> * While {@link ReactorReceiver * contract, this API performs the same disposal with additional * contextual information. For example, the context may indicate if the resource needs to be disposed of * internally when there is an error in the link, session or connection. * </p> * * <p> * Closing ReactorReceiver involves 3 stages, running in following order - * <ul> * <li>local-close (client to broker) via beginClose() </li> * <li>remote-close ack (broker to client)</li> * <li>disposal of ReactorReceiver resources via completeClose()</li> * </ul> * @link <a href="https: * * @param message Message to log. * @param errorCondition Error condition associated with close operation. */ protected Mono<Void> closeAsync(String message, ErrorCondition errorCondition) { if (isDisposed.getAndSet(true)) { return getIsClosedMono(); } addErrorCondition(logger.atVerbose(), errorCondition) .log("Setting error condition and disposing. {}", message); return beginClose(errorCondition) .flatMap(localCloseScheduled -> { if (localCloseScheduled) { return timeoutRemoteCloseAck(); } else { return Mono.empty(); } }) .publishOn(Schedulers.boundedElastic()); } /** * Gets the Mono that signals completion when the disposal/closing of ReactorReceiver is completed. * * @return the disposal/closing completion Mono. */ protected Mono<Void> getIsClosedMono() { return isClosedMono.asMono().publishOn(Schedulers.boundedElastic()); } /** * Beings the client side close by initiating local-close on underlying receiver. * * @param errorCondition Error condition associated with close operation. * @return a {@link Mono} when subscribed attempt to initiate local-close, emitting {@code true} * if local-close is scheduled on the dispatcher, emits {@code false} if unable to schedule * local-close that lead to manual close. */ private Mono<Boolean> beginClose(ErrorCondition errorCondition) { final Runnable localClose = () -> { if (receiver.getLocalState() != EndpointState.CLOSED) { receiver.close(); if (receiver.getCondition() == null) { receiver.setCondition(errorCondition); } } }; return Mono.create(sink -> { boolean localCloseScheduled = false; try { dispatcher.invoke(localClose); localCloseScheduled = true; } catch (IOException e) { logger.warning("IO sink was closed when scheduling work. Manually invoking and completing close.", e); localClose.run(); terminateEndpointState(); completeClose(); } catch (RejectedExecutionException e) { logger.info("RejectedExecutionException when scheduling on ReactorDispatcher. Manually invoking and completing close."); localClose.run(); terminateEndpointState(); completeClose(); } finally { sink.success(localCloseScheduled); } }); } /** * Apply timeout on remote-close ack. If timeout happens, i.e., if remote-close ack doesn't arrive within * the timeout duration, then terminate the Flux returned by getEndpointStates() and complete close. * * a {@link Mono} that registers remote-close ack timeout based close cleanup. */ private Mono<Void> timeoutRemoteCloseAck() { return isClosedMono.asMono() .timeout(retryOptions.getTryTimeout()) .onErrorResume(error -> { if (error instanceof TimeoutException) { logger.info("Timeout waiting for RemoteClose. Manually terminating EndpointStates and completing close."); terminateEndpointState(); completeClose(); } return Mono.empty(); }) .subscribeOn(Schedulers.boundedElastic()); } /** * Terminate the Flux returned by the getEndpointStates() API. * * <p> * The termination of Flux returned by getEndpointStates() is the signal that "AmqpReceiveLinkProcessor" * uses to either terminate its downstream or obtain a new ReactorReceiver to continue delivering events * downstream. * </p> */ private void terminateEndpointState() { this.terminateEndpointStates.emitEmpty((signalType, emitResult) -> { addSignalTypeAndResult(logger.atVerbose(), signalType, emitResult) .log("Could not emit EndpointStates termination."); return false; }); } /** * Completes the closing of the underlying receiver, which includes disposing of subscriptions, * closing of token manager, and releasing of protonJ resources. * <p> * The completeClose invoked in 3 cases - when the broker ack for beginClose (i.e. ack via * remote-close frame), if the broker ack for beginClose never comes through within timeout, * if the client fails to run beginClose. * </p> */ private void completeClose() { if (isCompleteCloseCalled.getAndSet(true)) { return; } isClosedMono.emitEmpty((signalType, result) -> { addSignalTypeAndResult(logger.atWarning(), signalType, result) .log("Unable to emit shutdown signal."); return false; }); subscriptions.dispose(); if (tokenManager != null) { tokenManager.close(); } handler.close(); receiver.free(); } @Override public String toString() { return String.format("connectionId: [%s] entity path: [%s] linkName: [%s]", receiver.getName(), entityPath, getLinkName()); } }
```suggestion throw new FileNotFoundException("Blob or container does not exist."); ```
public InputStream getInputStream() throws IOException { try { return this.blockBlobClient.openInputStream(); } catch (BlobStorageException e) { if (e.getErrorCode() == BlobErrorCode.CONTAINER_NOT_FOUND || e.getErrorCode() == BlobErrorCode.BLOB_NOT_FOUND) { throw new FileNotFoundException("Blob or container not existed."); } else { throw new IOException(MSG_FAIL_OPEN_INPUT, e); } } }
throw new FileNotFoundException("Blob or container not existed.");
public InputStream getInputStream() throws IOException { try { return this.blockBlobClient.openInputStream(); } catch (BlobStorageException e) { if (e.getErrorCode() == BlobErrorCode.CONTAINER_NOT_FOUND || e.getErrorCode() == BlobErrorCode.BLOB_NOT_FOUND) { throw new FileNotFoundException("Blob or container does not exist."); } else { throw new IOException(MSG_FAIL_OPEN_INPUT, e); } } }
class StorageBlobResource extends AzureStorageResource { private static final Logger LOGGER = LoggerFactory.getLogger(StorageBlobResource.class); private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of cloud blob"; private static final String MSG_FAIL_OPEN_INPUT = "Failed to open input stream of blob"; private final BlobServiceClient blobServiceClient; private final String location; private final BlobContainerClient blobContainerClient; private final BlockBlobClient blockBlobClient; private final boolean autoCreateFiles; private BlobProperties blobProperties; private final String snapshot; private final String versionId; private final String contentType; /** * Creates a new instance of {@link StorageBlobResource}. * * @param blobServiceClient the BlobServiceClient * @param location the location */ public StorageBlobResource(BlobServiceClient blobServiceClient, String location) { this(blobServiceClient, location, true); } /** * Creates a new instance of {@link StorageBlobResource}. * * @param blobServiceClient the BlobServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files */ public StorageBlobResource(BlobServiceClient blobServiceClient, String location, Boolean autoCreateFiles) { this(blobServiceClient, location, autoCreateFiles, null, null, null); } /** * Creates a new instance of {@link StorageBlobResource}. * * @param blobServiceClient the BlobServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files * @param snapshot the snapshot name * @param versionId the version id * @param contentType the content type */ public StorageBlobResource(BlobServiceClient blobServiceClient, String location, Boolean autoCreateFiles, String snapshot, String versionId, String contentType) { assertIsAzureStorageLocation(location); this.autoCreateFiles = autoCreateFiles == null ? isAutoCreateFiles(location) : autoCreateFiles; this.blobServiceClient = blobServiceClient; this.location = location; this.snapshot = snapshot; this.versionId = versionId; this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location); Assert.isTrue(!(StringUtils.hasText(versionId) && StringUtils.hasText(snapshot)), "'versionId' and 'snapshot' can not be both set"); this.blobContainerClient = blobServiceClient.getBlobContainerClient(getContainerName(location)); BlobClient blobClient = blobContainerClient.getBlobClient(getFilename(location)); if (StringUtils.hasText(versionId)) { blobClient = blobClient.getVersionClient(versionId); } if (StringUtils.hasText(snapshot)) { blobClient = blobClient.getSnapshotClient(snapshot); } this.blockBlobClient = blobClient.getBlockBlobClient(); } private boolean isAutoCreateFiles(String location) { return true; } /** * Creates and opens an output stream to write data to the block blob. If the blob already exists on the service, it * will be overwritten. * * @return A {@link BlobOutputStream} object used to write data to the blob. * @throws IOException If a storage service error occurred or blob not found. */ @Override public OutputStream getOutputStream() throws IOException { try { if (this.autoCreateFiles) { this.blobContainerClient.createIfNotExists(); } BlockBlobOutputStreamOptions options = new BlockBlobOutputStreamOptions(); if (StringUtils.hasText(contentType)) { BlobHttpHeaders blobHttpHeaders = new BlobHttpHeaders(); blobHttpHeaders.setContentType(contentType); options.setHeaders(blobHttpHeaders); } return this.blockBlobClient.getBlobOutputStream(options); } catch (BlobStorageException e) { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } /** * Gets if the blob this client represents exists in the cloud. * * @return true if the blob exists, false if it doesn't */ @Override public boolean exists() { return blockBlobClient.exists(); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ @Override public URL getURL() throws IOException { return new URL(this.blockBlobClient.getBlobUrl()); } /** * This implementation throws a FileNotFoundException, assuming * that the resource cannot be resolved to an absolute file path. */ @Override public File getFile() { throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path"); } private BlobProperties getBlobProperties() { if (blobProperties == null) { blobProperties = blockBlobClient.getProperties(); } return blobProperties; } /** * @return the size of the blob in bytes */ @Override public long contentLength() { return getBlobProperties().getBlobSize(); } /** * @return the time when the blob was last modified */ @Override public long lastModified() { return getBlobProperties().getLastModified().toEpochSecond(); } @Override public Resource createRelative(String relativePath) { String newLocation = this.location + "/" + relativePath; return new StorageBlobResource(this.blobServiceClient, newLocation, autoCreateFiles); } /** * @return The decoded name of the blob. */ @Override public String getFilename() { return this.blockBlobClient.getBlobName(); } /** * @return a description for this resource, * to be used for error output when working with the resource. */ @Override public String getDescription() { StringBuilder sb = new StringBuilder(); sb.append("Azure storage account blob resource [container='"); sb.append(this.blockBlobClient.getContainerName()); sb.append("', blob='"); sb.append(blockBlobClient.getBlobName()); sb.append("'"); if (versionId != null) { sb.append(", versionId='").append(versionId).append("'"); } if (snapshot != null) { sb.append(", snapshot='").append(snapshot).append("'"); } sb.append("]"); return sb.toString(); } @Override @Override StorageType getStorageType() { return StorageType.BLOB; } }
class StorageBlobResource extends AzureStorageResource { private static final Logger LOGGER = LoggerFactory.getLogger(StorageBlobResource.class); private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of cloud blob"; private static final String MSG_FAIL_OPEN_INPUT = "Failed to open input stream of blob"; private final BlobServiceClient blobServiceClient; private final String location; private final BlobContainerClient blobContainerClient; private final BlockBlobClient blockBlobClient; private final boolean autoCreateFiles; private BlobProperties blobProperties; private final String snapshot; private final String versionId; private final String contentType; /** * Creates a new instance of {@link StorageBlobResource}. * * @param blobServiceClient the BlobServiceClient * @param location the location */ public StorageBlobResource(BlobServiceClient blobServiceClient, String location) { this(blobServiceClient, location, true); } /** * Creates a new instance of {@link StorageBlobResource}. * * @param blobServiceClient the BlobServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files */ public StorageBlobResource(BlobServiceClient blobServiceClient, String location, Boolean autoCreateFiles) { this(blobServiceClient, location, autoCreateFiles, null, null, null); } /** * Creates a new instance of {@link StorageBlobResource}. * * @param blobServiceClient the BlobServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files * @param snapshot the snapshot name * @param versionId the version id * @param contentType the content type */ public StorageBlobResource(BlobServiceClient blobServiceClient, String location, Boolean autoCreateFiles, String snapshot, String versionId, String contentType) { assertIsAzureStorageLocation(location); this.autoCreateFiles = autoCreateFiles == null ? isAutoCreateFiles(location) : autoCreateFiles; this.blobServiceClient = blobServiceClient; this.location = location; this.snapshot = snapshot; this.versionId = versionId; this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location); Assert.isTrue(!(StringUtils.hasText(versionId) && StringUtils.hasText(snapshot)), "'versionId' and 'snapshot' can not be both set"); this.blobContainerClient = blobServiceClient.getBlobContainerClient(getContainerName(location)); BlobClient blobClient = blobContainerClient.getBlobClient(getFilename(location)); if (StringUtils.hasText(versionId)) { blobClient = blobClient.getVersionClient(versionId); } if (StringUtils.hasText(snapshot)) { blobClient = blobClient.getSnapshotClient(snapshot); } this.blockBlobClient = blobClient.getBlockBlobClient(); } private boolean isAutoCreateFiles(String location) { return true; } /** * Creates and opens an output stream to write data to the block blob. If the blob already exists on the service, it * will be overwritten. * * @return A {@link BlobOutputStream} object used to write data to the blob. * @throws IOException If a storage service error occurred or blob not found. */ @Override public OutputStream getOutputStream() throws IOException { try { if (this.autoCreateFiles) { this.blobContainerClient.createIfNotExists(); } BlockBlobOutputStreamOptions options = new BlockBlobOutputStreamOptions(); if (StringUtils.hasText(contentType)) { BlobHttpHeaders blobHttpHeaders = new BlobHttpHeaders(); blobHttpHeaders.setContentType(contentType); options.setHeaders(blobHttpHeaders); } return this.blockBlobClient.getBlobOutputStream(options); } catch (BlobStorageException e) { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } /** * Gets if the blob this client represents exists in the cloud. * * @return true if the blob exists, false if it doesn't */ @Override public boolean exists() { return blockBlobClient.exists(); } /** * Gets the URL of the blob represented by this client. * * @return the URL. */ @Override public URL getURL() throws IOException { return new URL(this.blockBlobClient.getBlobUrl()); } /** * This implementation throws a FileNotFoundException, assuming * that the resource cannot be resolved to an absolute file path. */ @Override public File getFile() { throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path"); } private BlobProperties getBlobProperties() { if (blobProperties == null) { blobProperties = blockBlobClient.getProperties(); } return blobProperties; } /** * @return the size of the blob in bytes */ @Override public long contentLength() { return getBlobProperties().getBlobSize(); } /** * @return the time when the blob was last modified */ @Override public long lastModified() { return getBlobProperties().getLastModified().toEpochSecond(); } @Override public Resource createRelative(String relativePath) { String newLocation = this.location + "/" + relativePath; return new StorageBlobResource(this.blobServiceClient, newLocation, autoCreateFiles); } /** * @return The decoded name of the blob. */ @Override public String getFilename() { return this.blockBlobClient.getBlobName(); } /** * @return a description for this resource, * to be used for error output when working with the resource. */ @Override public String getDescription() { StringBuilder sb = new StringBuilder(); sb.append("Azure storage account blob resource [container='"); sb.append(this.blockBlobClient.getContainerName()); sb.append("', blob='"); sb.append(blockBlobClient.getBlobName()); sb.append("'"); if (versionId != null) { sb.append(", versionId='").append(versionId).append("'"); } if (snapshot != null) { sb.append(", snapshot='").append(snapshot).append("'"); } sb.append("]"); return sb.toString(); } @Override @Override StorageType getStorageType() { return StorageType.BLOB; } }
```suggestion throw new FileNotFoundException("Share or file does not exist."); ```
public InputStream getInputStream() throws IOException { try { return this.shareFileClient.openInputStream(); } catch (ShareStorageException e) { if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND || e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) { throw new FileNotFoundException("Share or file not existed"); } else { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } }
throw new FileNotFoundException("Share or file not existed");
public InputStream getInputStream() throws IOException { try { return this.shareFileClient.openInputStream(); } catch (ShareStorageException e) { if (e.getErrorCode() == ShareErrorCode.SHARE_NOT_FOUND || e.getErrorCode() == ShareErrorCode.RESOURCE_NOT_FOUND) { throw new FileNotFoundException("Share or file does not exist"); } else { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } }
class StorageFileResource extends AzureStorageResource { private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file"; private final ShareServiceClient shareServiceClient; private final ShareClient shareClient; private final ShareFileClient shareFileClient; private final String location; private final boolean autoCreateFiles; private final String contentType; /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location */ public StorageFileResource(ShareServiceClient shareServiceClient, String location) { this(shareServiceClient, location, false); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) { this(shareServiceClient, location, autoCreateFiles, null); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files * @param contentType the content type */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles, String contentType) { assertIsAzureStorageLocation(location); this.autoCreateFiles = autoCreateFiles; this.location = location; this.shareServiceClient = shareServiceClient; this.shareClient = shareServiceClient.getShareClient(getContainerName(location)); this.shareFileClient = shareClient.getFileClient(getFilename(location)); this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location); } /** * Checks whether an Azure Storage File can be opened, * if the file is not existed, and autoCreateFiles==true, * it will create the file on Azure Storage. * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws IOException when fail to open the output stream. */ @Override public OutputStream getOutputStream() throws IOException { try { if (this.autoCreateFiles) { this.shareClient.createIfNotExists(); this.create(); } return this.shareFileClient.getFileOutputStream(); } catch (ShareStorageException e) { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } /** * Determines if the file this client represents exists in the cloud. * * @return Flag indicating existence of the file. */ @Override public boolean exists() { return this.shareFileClient.exists(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client. */ @Override public URL getURL() throws IOException { return new URL(this.shareFileClient.getFileUrl()); } /** * This implementation throws a FileNotFoundException, assuming * that the resource cannot be resolved to an absolute file path. */ @Override public File getFile() { throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path"); } /** * @return The number of bytes present in the response body. */ @Override public long contentLength() { return this.shareFileClient.getProperties().getContentLength(); } /** * * @return Last time the directory was modified. */ @Override public long lastModified() { return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000; } /** * Create relative resource from current location. * * @param relativePath the relative path. * @return StorageFileResource with relative path from current location. */ @Override public Resource createRelative(String relativePath) { String newLocation = this.location + "/" + relativePath; return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles); } /** * @return The name of the file. */ @Override public String getFilename() { final String[] split = this.shareFileClient.getFilePath().split("/"); return split[split.length - 1]; } /** * @return a description for this resource, * to be used for error output when working with the resource. */ @Override public String getDescription() { return String.format("Azure storage account file resource [container='%s', file='%s']", this.shareFileClient.getShareName(), this.getFilename()); } @Override @Override StorageType getStorageType() { return StorageType.FILE; } private void create() throws ShareStorageException { if (!this.exists()) { ShareFileHttpHeaders header = null; if (StringUtils.hasText(contentType)) { header = new ShareFileHttpHeaders(); header.setContentType(contentType); } this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE) .getValue(); } } }
class StorageFileResource extends AzureStorageResource { private static final String MSG_FAIL_OPEN_OUTPUT = "Failed to open output stream of file"; private final ShareServiceClient shareServiceClient; private final ShareClient shareClient; private final ShareFileClient shareFileClient; private final String location; private final boolean autoCreateFiles; private final String contentType; /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location */ public StorageFileResource(ShareServiceClient shareServiceClient, String location) { this(shareServiceClient, location, false); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles) { this(shareServiceClient, location, autoCreateFiles, null); } /** * Creates a new instance of {@link StorageFileResource}. * * @param shareServiceClient the ShareServiceClient * @param location the location * @param autoCreateFiles whether to automatically create files * @param contentType the content type */ public StorageFileResource(ShareServiceClient shareServiceClient, String location, boolean autoCreateFiles, String contentType) { assertIsAzureStorageLocation(location); this.autoCreateFiles = autoCreateFiles; this.location = location; this.shareServiceClient = shareServiceClient; this.shareClient = shareServiceClient.getShareClient(getContainerName(location)); this.shareFileClient = shareClient.getFileClient(getFilename(location)); this.contentType = StringUtils.hasText(contentType) ? contentType : getContentType(location); } /** * Checks whether an Azure Storage File can be opened, * if the file is not existed, and autoCreateFiles==true, * it will create the file on Azure Storage. * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws IOException when fail to open the output stream. */ @Override public OutputStream getOutputStream() throws IOException { try { if (this.autoCreateFiles) { this.shareClient.createIfNotExists(); this.create(); } return this.shareFileClient.getFileOutputStream(); } catch (ShareStorageException e) { throw new IOException(MSG_FAIL_OPEN_OUTPUT, e); } } /** * Determines if the file this client represents exists in the cloud. * * @return Flag indicating existence of the file. */ @Override public boolean exists() { return this.shareFileClient.exists(); } /** * Get the url of the storage file client. * * @return the URL of the storage file client. */ @Override public URL getURL() throws IOException { return new URL(this.shareFileClient.getFileUrl()); } /** * This implementation throws a FileNotFoundException, assuming * that the resource cannot be resolved to an absolute file path. */ @Override public File getFile() { throw new UnsupportedOperationException(getDescription() + " cannot be resolved to absolute file path"); } /** * @return The number of bytes present in the response body. */ @Override public long contentLength() { return this.shareFileClient.getProperties().getContentLength(); } /** * * @return Last time the directory was modified. */ @Override public long lastModified() { return this.shareFileClient.getProperties().getLastModified().toEpochSecond() * 1000; } /** * Create relative resource from current location. * * @param relativePath the relative path. * @return StorageFileResource with relative path from current location. */ @Override public Resource createRelative(String relativePath) { String newLocation = this.location + "/" + relativePath; return new StorageFileResource(this.shareServiceClient, newLocation, autoCreateFiles); } /** * @return The name of the file. */ @Override public String getFilename() { final String[] split = this.shareFileClient.getFilePath().split("/"); return split[split.length - 1]; } /** * @return a description for this resource, * to be used for error output when working with the resource. */ @Override public String getDescription() { return String.format("Azure storage account file resource [container='%s', file='%s']", this.shareFileClient.getShareName(), this.getFilename()); } @Override @Override StorageType getStorageType() { return StorageType.FILE; } private void create() throws ShareStorageException { if (!this.exists()) { ShareFileHttpHeaders header = null; if (StringUtils.hasText(contentType)) { header = new ShareFileHttpHeaders(); header.setContentType(contentType); } this.shareFileClient.createWithResponse(1024, header, null, null, null, null, Context.NONE) .getValue(); } } }