comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
listDeletedCertificatesNextPage doesn't need the parameter `includePending`?
PagedFlux<DeletedCertificate> listDeletedCertificates(Boolean includePending, Context context) { return new PagedFlux<>( () -> listDeletedCertificatesFirstPage(includePending, context), continuationToken -> listDeletedCertificatesNextPage(continuationToken, context)); }
continuationToken -> listDeletedCertificatesNextPage(continuationToken, context));
PagedFlux<DeletedCertificate> listDeletedCertificates(Boolean includePending, Context context) { return new PagedFlux<>( () -> listDeletedCertificatesFirstPage(includePending, context), continuationToken -> listDeletedCertificatesNextPage(continuationToken, context)); }
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return vaultUrl; } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets a pending {@link CertificateOperation} from the key vault. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Geta a pending certificate operation. The {@link PollerFlux poller} allows users to automatically poll on the certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * @param name The name of the certificate. * @throws ResourceNotFoundException when a certificate operation for a certificate with {@code name} doesn't exist. * @return A {@link PollerFlux} polling on the certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> getCertificateOperation(String name) { return new PollerFlux<>(Duration.ofSeconds(1), null, createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Deletes a certificate from a specified key vault. All the versions of the certificate along with its associated policy * get deleted. If soft-delete is enabled on the key vault then the certificate is placed in the deleted state and requires to be * purged for permanent deletion else the certificate is permanently deleted. The delete operation applies to any certificate stored in * Azure Key Vault but it cannot be applied to an individual version of a certificate. This operation requires the certificates/delete permission. * * <p><strong>Code Samples</strong></p> * <p>Deletes the certificate in the Azure Key Vault. Prints out the deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginDeleteCertificate * * @param name The name of the certificate to be deleted. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link PollerFlux} to poll on the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<DeletedCertificate, Void> beginDeleteCertificate(String name) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name), createDeletePollOperation(name), (context, firstResponse) -> Mono.empty(), (context) -> Mono.empty()); } private Function<PollingContext<DeletedCertificate>, Mono<DeletedCertificate>> activationOperation(String name) { return (pollingContext) -> withContext(context -> deleteCertificateWithResponse(name, context) .flatMap(deletedCertificateResponse -> Mono.just(deletedCertificateResponse.getValue()))); } /* Polling operation to poll on create delete certificate operation status. */ private Function<PollingContext<DeletedCertificate>, Mono<PollResponse<DeletedCertificate>>> createDeletePollOperation(String keyName) { return pollingContext -> withContext(context -> service.getDeletedCertificatePoller(vaultUrl, keyName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(deletedCertificateResponse -> { if (deletedCertificateResponse.getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, pollingContext.getLatestResponse().getValue()))); } return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, deletedCertificateResponse.getValue()))); })) .onErrorReturn(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollingContext.getLatestResponse().getValue())); } Mono<Response<DeletedCertificate>> deleteCertificateWithResponse(String name, Context context) { return service.deleteCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Deleting certificate - {}", name)) .doOnSuccess(response -> logger.info("Deleted the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to delete the certificate - {}", name, error)); } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificate * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DeletedCertificate> getDeletedCertificate(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name, Context context) { return service.getDeletedCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving deleted certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the deleted certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the deleted certificate - {}", name, error)); } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return An empty {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> purgeDeletedCertificate(String name) { try { return purgeDeletedCertificateWithResponse(name).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a Void Response}
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; static final String KEY_VAULT_SCOPE = "https: private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return vaultUrl; } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginCreateCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginCreateCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: status = LongRunningOperationStatus.fromString(certificateOperationResponse.getValue().getStatus(), true); break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets a pending {@link CertificateOperation} from the key vault. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Get a pending certificate operation. The {@link PollerFlux poller} allows users to automatically poll on the certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateOperation * * @param name The name of the certificate. * @throws ResourceNotFoundException when a certificate operation for a certificate with {@code name} doesn't exist. * @return A {@link PollerFlux} polling on the certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> getCertificateOperation(String name) { return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Deletes a certificate from a specified key vault. All the versions of the certificate along with its associated policy * get deleted. If soft-delete is enabled on the key vault then the certificate is placed in the deleted state and requires to be * purged for permanent deletion else the certificate is permanently deleted. The delete operation applies to any certificate stored in * Azure Key Vault but it cannot be applied to an individual version of a certificate. This operation requires the certificates/delete permission. * * <p><strong>Code Samples</strong></p> * <p>Deletes the certificate in the Azure Key Vault. Prints out the deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginDeleteCertificate * * @param name The name of the certificate to be deleted. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link PollerFlux} to poll on the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<DeletedCertificate, Void> beginDeleteCertificate(String name) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name), createDeletePollOperation(name), (context, firstResponse) -> Mono.empty(), (context) -> Mono.empty()); } private Function<PollingContext<DeletedCertificate>, Mono<DeletedCertificate>> activationOperation(String name) { return (pollingContext) -> withContext(context -> deleteCertificateWithResponse(name, context) .flatMap(deletedCertificateResponse -> Mono.just(deletedCertificateResponse.getValue()))); } /* Polling operation to poll on create delete certificate operation status. */ private Function<PollingContext<DeletedCertificate>, Mono<PollResponse<DeletedCertificate>>> createDeletePollOperation(String keyName) { return pollingContext -> withContext(context -> service.getDeletedCertificatePoller(vaultUrl, keyName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(deletedCertificateResponse -> { if (deletedCertificateResponse.getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, pollingContext.getLatestResponse().getValue()))); } return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, deletedCertificateResponse.getValue()))); })) .onErrorReturn(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollingContext.getLatestResponse().getValue())); } Mono<Response<DeletedCertificate>> deleteCertificateWithResponse(String name, Context context) { return service.deleteCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Deleting certificate - {}", name)) .doOnSuccess(response -> logger.info("Deleted the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to delete the certificate - {}", name, error)); } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificate * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DeletedCertificate> getDeletedCertificate(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name, Context context) { return service.getDeletedCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving deleted certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the deleted certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the deleted certificate - {}", name, error)); } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return An empty {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> purgeDeletedCertificate(String name) { try { return purgeDeletedCertificateWithResponse(name).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a Void Response}
nope, next page just needs the continuationToken.
PagedFlux<DeletedCertificate> listDeletedCertificates(Boolean includePending, Context context) { return new PagedFlux<>( () -> listDeletedCertificatesFirstPage(includePending, context), continuationToken -> listDeletedCertificatesNextPage(continuationToken, context)); }
continuationToken -> listDeletedCertificatesNextPage(continuationToken, context));
PagedFlux<DeletedCertificate> listDeletedCertificates(Boolean includePending, Context context) { return new PagedFlux<>( () -> listDeletedCertificatesFirstPage(includePending, context), continuationToken -> listDeletedCertificatesNextPage(continuationToken, context)); }
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return vaultUrl; } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets a pending {@link CertificateOperation} from the key vault. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Geta a pending certificate operation. The {@link PollerFlux poller} allows users to automatically poll on the certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * @param name The name of the certificate. * @throws ResourceNotFoundException when a certificate operation for a certificate with {@code name} doesn't exist. * @return A {@link PollerFlux} polling on the certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> getCertificateOperation(String name) { return new PollerFlux<>(Duration.ofSeconds(1), null, createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Deletes a certificate from a specified key vault. All the versions of the certificate along with its associated policy * get deleted. If soft-delete is enabled on the key vault then the certificate is placed in the deleted state and requires to be * purged for permanent deletion else the certificate is permanently deleted. The delete operation applies to any certificate stored in * Azure Key Vault but it cannot be applied to an individual version of a certificate. This operation requires the certificates/delete permission. * * <p><strong>Code Samples</strong></p> * <p>Deletes the certificate in the Azure Key Vault. Prints out the deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginDeleteCertificate * * @param name The name of the certificate to be deleted. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link PollerFlux} to poll on the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<DeletedCertificate, Void> beginDeleteCertificate(String name) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name), createDeletePollOperation(name), (context, firstResponse) -> Mono.empty(), (context) -> Mono.empty()); } private Function<PollingContext<DeletedCertificate>, Mono<DeletedCertificate>> activationOperation(String name) { return (pollingContext) -> withContext(context -> deleteCertificateWithResponse(name, context) .flatMap(deletedCertificateResponse -> Mono.just(deletedCertificateResponse.getValue()))); } /* Polling operation to poll on create delete certificate operation status. */ private Function<PollingContext<DeletedCertificate>, Mono<PollResponse<DeletedCertificate>>> createDeletePollOperation(String keyName) { return pollingContext -> withContext(context -> service.getDeletedCertificatePoller(vaultUrl, keyName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(deletedCertificateResponse -> { if (deletedCertificateResponse.getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, pollingContext.getLatestResponse().getValue()))); } return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, deletedCertificateResponse.getValue()))); })) .onErrorReturn(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollingContext.getLatestResponse().getValue())); } Mono<Response<DeletedCertificate>> deleteCertificateWithResponse(String name, Context context) { return service.deleteCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Deleting certificate - {}", name)) .doOnSuccess(response -> logger.info("Deleted the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to delete the certificate - {}", name, error)); } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificate * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DeletedCertificate> getDeletedCertificate(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name, Context context) { return service.getDeletedCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving deleted certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the deleted certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the deleted certificate - {}", name, error)); } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return An empty {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> purgeDeletedCertificate(String name) { try { return purgeDeletedCertificateWithResponse(name).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a Void Response}
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; static final String KEY_VAULT_SCOPE = "https: private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return vaultUrl; } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginCreateCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginCreateCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: status = LongRunningOperationStatus.fromString(certificateOperationResponse.getValue().getStatus(), true); break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets a pending {@link CertificateOperation} from the key vault. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Get a pending certificate operation. The {@link PollerFlux poller} allows users to automatically poll on the certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateOperation * * @param name The name of the certificate. * @throws ResourceNotFoundException when a certificate operation for a certificate with {@code name} doesn't exist. * @return A {@link PollerFlux} polling on the certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> getCertificateOperation(String name) { return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Deletes a certificate from a specified key vault. All the versions of the certificate along with its associated policy * get deleted. If soft-delete is enabled on the key vault then the certificate is placed in the deleted state and requires to be * purged for permanent deletion else the certificate is permanently deleted. The delete operation applies to any certificate stored in * Azure Key Vault but it cannot be applied to an individual version of a certificate. This operation requires the certificates/delete permission. * * <p><strong>Code Samples</strong></p> * <p>Deletes the certificate in the Azure Key Vault. Prints out the deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginDeleteCertificate * * @param name The name of the certificate to be deleted. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link PollerFlux} to poll on the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<DeletedCertificate, Void> beginDeleteCertificate(String name) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name), createDeletePollOperation(name), (context, firstResponse) -> Mono.empty(), (context) -> Mono.empty()); } private Function<PollingContext<DeletedCertificate>, Mono<DeletedCertificate>> activationOperation(String name) { return (pollingContext) -> withContext(context -> deleteCertificateWithResponse(name, context) .flatMap(deletedCertificateResponse -> Mono.just(deletedCertificateResponse.getValue()))); } /* Polling operation to poll on create delete certificate operation status. */ private Function<PollingContext<DeletedCertificate>, Mono<PollResponse<DeletedCertificate>>> createDeletePollOperation(String keyName) { return pollingContext -> withContext(context -> service.getDeletedCertificatePoller(vaultUrl, keyName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(deletedCertificateResponse -> { if (deletedCertificateResponse.getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, pollingContext.getLatestResponse().getValue()))); } return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, deletedCertificateResponse.getValue()))); })) .onErrorReturn(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollingContext.getLatestResponse().getValue())); } Mono<Response<DeletedCertificate>> deleteCertificateWithResponse(String name, Context context) { return service.deleteCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Deleting certificate - {}", name)) .doOnSuccess(response -> logger.info("Deleted the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to delete the certificate - {}", name, error)); } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificate * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DeletedCertificate> getDeletedCertificate(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name, Context context) { return service.getDeletedCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving deleted certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the deleted certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the deleted certificate - {}", name, error)); } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return An empty {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> purgeDeletedCertificate(String name) { try { return purgeDeletedCertificateWithResponse(name).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a Void Response}
updated
Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); }
return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context);
new CertificateRequestParameters() .certificatePolicy(new CertificatePolicyRequest(certificatePolicy)) .certificateAttributes(new CertificateRequestAttributes().enabled(enabled)) .tags(tags); return service.createCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, certificateRequestParameters, CONTENT_TYPE_HEADER_VALUE, context); } /** * Gets a pending {@link CertificateOperation}
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return vaultUrl; } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.createCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = from the key vault. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Geta a pending certificate operation. The {@link PollerFlux poller} allows users to automatically poll on the certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * @param name The name of the certificate. * @throws ResourceNotFoundException when a certificate operation for a certificate with {@code name} doesn't exist. * @return A {@link PollerFlux} polling on the certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> getCertificateOperation(String name) { return new PollerFlux<>(Duration.ofSeconds(1), null, createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Deletes a certificate from a specified key vault. All the versions of the certificate along with its associated policy * get deleted. If soft-delete is enabled on the key vault then the certificate is placed in the deleted state and requires to be * purged for permanent deletion else the certificate is permanently deleted. The delete operation applies to any certificate stored in * Azure Key Vault but it cannot be applied to an individual version of a certificate. This operation requires the certificates/delete permission. * * <p><strong>Code Samples</strong></p> * <p>Deletes the certificate in the Azure Key Vault. Prints out the deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginDeleteCertificate * * @param name The name of the certificate to be deleted. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link PollerFlux} to poll on the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<DeletedCertificate, Void> beginDeleteCertificate(String name) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name), createDeletePollOperation(name), (context, firstResponse) -> Mono.empty(), (context) -> Mono.empty()); } private Function<PollingContext<DeletedCertificate>, Mono<DeletedCertificate>> activationOperation(String name) { return (pollingContext) -> withContext(context -> deleteCertificateWithResponse(name, context) .flatMap(deletedCertificateResponse -> Mono.just(deletedCertificateResponse.getValue()))); } /* Polling operation to poll on create delete certificate operation status. */ private Function<PollingContext<DeletedCertificate>, Mono<PollResponse<DeletedCertificate>>> createDeletePollOperation(String keyName) { return pollingContext -> withContext(context -> service.getDeletedCertificatePoller(vaultUrl, keyName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(deletedCertificateResponse -> { if (deletedCertificateResponse.getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, pollingContext.getLatestResponse().getValue()))); } return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, deletedCertificateResponse.getValue()))); })) .onErrorReturn(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollingContext.getLatestResponse().getValue())); } Mono<Response<DeletedCertificate>> deleteCertificateWithResponse(String name, Context context) { return service.deleteCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Deleting certificate - {}", name)) .doOnSuccess(response -> logger.info("Deleted the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to delete the certificate - {}", name, error)); } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificate * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DeletedCertificate> getDeletedCertificate(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name, Context context) { return service.getDeletedCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving deleted certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the deleted certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the deleted certificate - {}", name, error)); } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return An empty {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> purgeDeletedCertificate(String name) { try { return purgeDeletedCertificateWithResponse(name).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a Void Response}
class CertificateAsyncClient { static final String API_VERSION = "7.0"; static final String ACCEPT_LANGUAGE = "en-US"; static final int DEFAULT_MAX_PAGE_RESULTS = 25; static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; static final String KEY_VAULT_SCOPE = "https: private final String vaultUrl; private final CertificateService service; private final ClientLogger logger = new ClientLogger(CertificateAsyncClient.class); /** * Creates a CertificateAsyncClient that uses {@code pipeline} to service requests * * @param vaultUrl URL for the Azure KeyVault service. * @param pipeline HttpPipeline that the HTTP requests and responses flow through. * @param version {@link CertificateServiceVersion} of the service to be used when making requests. */ CertificateAsyncClient(URL vaultUrl, HttpPipeline pipeline, CertificateServiceVersion version) { Objects.requireNonNull(vaultUrl, KeyVaultErrorCodeStrings.getErrorString(KeyVaultErrorCodeStrings.VAULT_END_POINT_REQUIRED)); this.vaultUrl = vaultUrl.toString(); this.service = RestProxy.create(CertificateService.class, pipeline); } /** * Get the vault endpoint url to which service requests are sent to. * @return the vault endpoint url */ public String getVaultUrl() { return vaultUrl; } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginCreateCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @param enabled The enabled status for the certificate. * @param tags The application specific metadata to set. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name, policy, enabled, tags), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } private BiFunction<PollingContext<CertificateOperation>, PollResponse<CertificateOperation>, Mono<CertificateOperation>> cancelOperation(String name) { return (pollingContext, firstResponse) -> withContext(context -> cancelCertificateOperationWithResponse(name, context)).flatMap(FluxUtil::toMono); } private Function<PollingContext<CertificateOperation>, Mono<CertificateOperation>> activationOperation(String name, CertificatePolicy policy, boolean enabled, Map<String, String> tags) { return (pollingContext) -> withContext(context -> createCertificateWithResponse(name, policy, enabled, tags, context) .flatMap(certificateOperationResponse -> Mono.just(certificateOperationResponse.getValue()))); } private Function<PollingContext<CertificateOperation>, Mono<KeyVaultCertificate>> fetchResultOperation(String name) { return (pollingContext) -> withContext(context -> getCertificateWithResponse(name, "", context) .flatMap(certificateResponse -> Mono.just(certificateResponse.getValue()))); } /** * Creates a new certificate. If this is the first version, the certificate resource is created. This operation requires * the certificates/create permission. * * <p><strong>Code Samples</strong></p> * <p>Create certificate is a long running operation. The {@link PollerFlux poller} allows users to automatically poll on the create certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginCreateCertificate * * @param name The name of the certificate to be created. * @param policy The policy of the certificate to be created. * @throws ResourceModifiedException when invalid certificate policy configuration is provided. * @return A {@link PollerFlux} polling on the create certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> beginCreateCertificate(String name, CertificatePolicy policy) { return beginCreateCertificate(name, policy, true, null); } /* Polling operation to poll on create certificate operation status. */ private Function<PollingContext<CertificateOperation>, Mono<PollResponse<CertificateOperation>>> createPollOperation(String certificateName) { return (pollingContext) -> { try { return withContext(context -> service.getCertificateOperation(vaultUrl, certificateName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(this::processCertificateOperationResponse)); } catch (HttpRequestException e) { logger.logExceptionAsError(e); return Mono.just(new PollResponse<>(LongRunningOperationStatus.FAILED, null)); } }; } private Mono<PollResponse<CertificateOperation>> processCertificateOperationResponse(Response<CertificateOperation> certificateOperationResponse) { LongRunningOperationStatus status = null; switch (certificateOperationResponse.getValue().getStatus()) { case "inProgress": status = LongRunningOperationStatus.IN_PROGRESS; break; case "completed": status = LongRunningOperationStatus.SUCCESSFULLY_COMPLETED; break; case "failed": status = LongRunningOperationStatus.FAILED; break; default: status = LongRunningOperationStatus.fromString(certificateOperationResponse.getValue().getStatus(), true); break; } return Mono.just(new PollResponse<>(status, certificateOperationResponse.getValue())); } Mono<Response<CertificateOperation>> createCertificateWithResponse(String name, CertificatePolicy certificatePolicy, boolean enabled, Map<String, String> tags, Context context) { CertificateRequestParameters certificateRequestParameters = from the key vault. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Get a pending certificate operation. The {@link PollerFlux poller} allows users to automatically poll on the certificate * operation status. It is possible to monitor each intermediate poll response during the poll operation.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateOperation * * @param name The name of the certificate. * @throws ResourceNotFoundException when a certificate operation for a certificate with {@code name} doesn't exist. * @return A {@link PollerFlux} polling on the certificate operation status. */ public PollerFlux<CertificateOperation, KeyVaultCertificate> getCertificateOperation(String name) { return new PollerFlux<>(Duration.ofSeconds(1), (pollingContext) -> Mono.empty(), createPollOperation(name), cancelOperation(name), fetchResultOperation(name)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificate * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificateWithPolicy certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificateWithPolicy> getCertificate(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name) { try { return withContext(context -> getCertificateWithResponse(name, "", context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificateWithPolicy>> getCertificateWithResponse(String name, String version, Context context) { return service.getCertificateWithPolicy(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version, Context context) { return service.getCertificate(vaultUrl, name, version, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the certificate - {}", name, error)); } /** * Gets information about the latest version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersionWithResponse * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> getCertificateVersionWithResponse(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Gets information about the specified version of the specified certificate. This operation requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p>Gets a specific version of the certificate in the key vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getCertificateVersion * * @param name The name of the certificate to retrieve, cannot be null * @param version The version of the certificate to retrieve. If this is an empty String or null then latest version of the certificate is retrieved. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException if {@code name} is empty string. * @return A {@link Mono} containing the requested {@link KeyVaultCertificate certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> getCertificateVersion(String name, String version) { try { return withContext(context -> getCertificateVersionWithResponse(name, version == null ? "" : version, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its tags and enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificateProperties * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing the {@link CertificateProperties updated certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<KeyVaultCertificate> updateCertificateProperties(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Updates the specified attributes associated with the specified certificate. The update operation changes specified attributes of an existing * stored certificate and attributes that are not specified in the request are left unchanged. This operation requires the certificates/update permission. * * <p><strong>Code Samples</strong></p> * <p>Gets latest version of the certificate, changes its enabled status and then updates it in the Azure Key Vault. Prints out the * returned certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.updateCertificatePropertiesWithResponse * * @param certificateProperties The {@link CertificateProperties} object with updated properties. * @throws NullPointerException if {@code certificate} is {@code null}. * @throws ResourceNotFoundException when a certificate with {@link CertificateProperties * @throws HttpRequestException if {@link CertificateProperties * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties) { try { return withContext(context -> updateCertificatePropertiesWithResponse(certificateProperties, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<KeyVaultCertificate>> updateCertificatePropertiesWithResponse(CertificateProperties certificateProperties, Context context) { Objects.requireNonNull(certificateProperties, "certificateProperties' cannot be null."); CertificateUpdateParameters parameters = new CertificateUpdateParameters() .tags(certificateProperties.getTags()) .certificateAttributes(new CertificateRequestAttributes(certificateProperties)); return service.updateCertificate(vaultUrl, certificateProperties.getName(), API_VERSION, ACCEPT_LANGUAGE, parameters, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Updating certificate - {}", certificateProperties.getName())) .doOnSuccess(response -> logger.info("Updated the certificate - {}", certificateProperties.getName())) .doOnError(error -> logger.warning("Failed to update the certificate - {}", certificateProperties.getName(), error)); } /** * Deletes a certificate from a specified key vault. All the versions of the certificate along with its associated policy * get deleted. If soft-delete is enabled on the key vault then the certificate is placed in the deleted state and requires to be * purged for permanent deletion else the certificate is permanently deleted. The delete operation applies to any certificate stored in * Azure Key Vault but it cannot be applied to an individual version of a certificate. This operation requires the certificates/delete permission. * * <p><strong>Code Samples</strong></p> * <p>Deletes the certificate in the Azure Key Vault. Prints out the deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.beginDeleteCertificate * * @param name The name of the certificate to be deleted. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link PollerFlux} to poll on the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public PollerFlux<DeletedCertificate, Void> beginDeleteCertificate(String name) { return new PollerFlux<>(Duration.ofSeconds(1), activationOperation(name), createDeletePollOperation(name), (context, firstResponse) -> Mono.empty(), (context) -> Mono.empty()); } private Function<PollingContext<DeletedCertificate>, Mono<DeletedCertificate>> activationOperation(String name) { return (pollingContext) -> withContext(context -> deleteCertificateWithResponse(name, context) .flatMap(deletedCertificateResponse -> Mono.just(deletedCertificateResponse.getValue()))); } /* Polling operation to poll on create delete certificate operation status. */ private Function<PollingContext<DeletedCertificate>, Mono<PollResponse<DeletedCertificate>>> createDeletePollOperation(String keyName) { return pollingContext -> withContext(context -> service.getDeletedCertificatePoller(vaultUrl, keyName, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .flatMap(deletedCertificateResponse -> { if (deletedCertificateResponse.getStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.IN_PROGRESS, pollingContext.getLatestResponse().getValue()))); } return Mono.defer(() -> Mono.just(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, deletedCertificateResponse.getValue()))); })) .onErrorReturn(new PollResponse<>(LongRunningOperationStatus.SUCCESSFULLY_COMPLETED, pollingContext.getLatestResponse().getValue())); } Mono<Response<DeletedCertificate>> deleteCertificateWithResponse(String name, Context context) { return service.deleteCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Deleting certificate - {}", name)) .doOnSuccess(response -> logger.info("Deleted the certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to delete the certificate - {}", name, error)); } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificate * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing the {@link DeletedCertificate deleted certificate}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<DeletedCertificate> getDeletedCertificate(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Retrieves information about the specified deleted certificate. The GetDeletedCertificate operation is applicable for soft-delete * enabled vaults and additionally retrieves deleted certificate's attributes, such as retention interval, scheduled permanent deletion and the current deletion recovery level. This operation * requires the certificates/get permission. * * <p><strong>Code Samples</strong></p> * <p> Gets the deleted certificate from the key vault enabled for soft-delete. Prints out the * deleted certificate details when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.getDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a {@link Response} whose {@link Response */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name) { try { return withContext(context -> getDeletedCertificateWithResponse(name, context)); } catch (RuntimeException ex) { return monoError(logger, ex); } } Mono<Response<DeletedCertificate>> getDeletedCertificateWithResponse(String name, Context context) { return service.getDeletedCertificate(vaultUrl, name, API_VERSION, ACCEPT_LANGUAGE, CONTENT_TYPE_HEADER_VALUE, context) .doOnRequest(ignored -> logger.info("Retrieving deleted certificate - {}", name)) .doOnSuccess(response -> logger.info("Retrieved the deleted certificate - {}", response.getValue().getProperties().getName())) .doOnError(error -> logger.warning("Failed to Retrieve the deleted certificate - {}", name, error)); } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return An empty {@link Mono}. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<Void> purgeDeletedCertificate(String name) { try { return purgeDeletedCertificateWithResponse(name).flatMap(FluxUtil::toMono); } catch (RuntimeException ex) { return monoError(logger, ex); } } /** * Permanently deletes the specified deleted certificate without possibility for recovery. The Purge Deleted Certificate operation is applicable for * soft-delete enabled vaults and is not available if the recovery level does not specify 'Purgeable'. This operation requires the certificate/purge permission. * * <p><strong>Code Samples</strong></p> * <p>Purges the deleted certificate from the key vault enabled for soft-delete. Prints out the * status code from the server response when a response has been received.</p> * * {@codesnippet com.azure.security.keyvault.certificates.CertificateAsyncClient.purgeDeletedCertificateWithResponse * * @param name The name of the deleted certificate. * @throws ResourceNotFoundException when a certificate with {@code name} doesn't exist in the key vault. * @throws HttpRequestException when a certificate with {@code name} is empty string. * @return A {@link Mono} containing a Void Response}
What if message already has diagnostic-id? We should not create message span or stamp context on the message
private EventData setSpanContext(EventData event, Context parentContext) { Context eventSpanContext = tracerProvider.startSpan(parentContext, ProcessKind.MESSAGE); if (eventSpanContext != null) { Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { event.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); event.addContext(SPAN_CONTEXT_KEY, eventSpanContext); } } return event; }
event.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString());
private EventData setSpanContext(EventData event, Context parentContext) { Optional<Object> eventContextData = event.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return event; } else { Context eventSpanContext = tracerProvider.startSpan(parentContext, ProcessKind.MESSAGE); if (eventSpanContext != null) { Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { event.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); event.addContext(SPAN_CONTEXT_KEY, eventSpanContext); } } } return event; }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final BatchOptions DEFAULT_BATCH_OPTIONS = new BatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final RetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link BatchOptions * balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, RetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ @ServiceMethod(returns = ReturnType.COLLECTION) public Flux<String> getPartitionIds() { return getProperties().flatMapMany(properties -> Flux.fromArray(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @param options A set of options used to configure the {@link EventDataBatch}. * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(BatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final BatchOptions clone = options.clone(); if (!CoreUtils.isNullOrEmpty(clone.getPartitionKey()) && !CoreUtils.isNullOrEmpty(clone.getPartitionId())) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.getPartitionKey() and BatchOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", clone.getPartitionKey(), clone.getPartitionId()))); } else if (!CoreUtils.isNullOrEmpty(clone.getPartitionKey()) && clone.getPartitionKey().length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", clone.getPartitionKey(), MAX_PARTITION_KEY_LENGTH))); } return getSendLink(clone.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.getMaximumSizeInBytes() > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.getMaximumSizeInBytes(), maximumLinkSize))); } final int batchSize = clone.getMaximumSizeInBytes() > 0 ? clone.getMaximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.getPartitionId(), clone.getPartitionKey(), link::getErrorContext)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning("Cannot send an EventBatch that is empty."); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getSize(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getSize(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getSize()); } final String partitionKey = batch.getPartitionKey(); final List<Message> messages = batch.getEvents().stream().map(event -> { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; }).collect(Collectors.toList()); return getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final SendOptions clone = options.clone(); final boolean isTracingEnabled = tracerProvider.isEnabled(); if (!CoreUtils.isNullOrEmpty(clone.getPartitionKey()) && !CoreUtils.isNullOrEmpty(clone.getPartitionId())) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.getPartitionKey() and BatchOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", clone.getPartitionKey(), clone.getPartitionId()))); } return getSendLink(options.getPartitionId()) .flatMap(link -> { final AtomicReference<Context> sendSpanContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final BatchOptions batchOptions = new BatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); final AtomicBoolean isFirst = new AtomicBoolean(true); return events.map(eventData -> { if (!isTracingEnabled) { return eventData; } final Context parentContext = eventData.getContext(); if (isFirst.getAndSet(false)) { Context entityContext = parentContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); sendSpanContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return setSpanContext(eventData, parentContext); }).collect(new EventDataCollector(batchOptions, 1, link::getErrorContext)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(sendSpanContext.get(), signal); } }); }); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient} by closing the underlying connection to the service. */ @Override public void close() { if (!isDisposed.getAndSet(true)) { openLinks.forEach((key, value) -> { try { value.close(); } catch (IOException e) { logger.warning("Error closing link for partition: {}", key, e); } }); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(BatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
class EventHubProducerAsyncClient implements Closeable { private static final int MAX_PARTITION_KEY_LENGTH = 128; private static final String SENDER_ENTITY_PATH_FORMAT = "%s/Partitions/%s"; private static final SendOptions DEFAULT_SEND_OPTIONS = new SendOptions(); private static final CreateBatchOptions DEFAULT_BATCH_OPTIONS = new CreateBatchOptions(); /** * Keeps track of the opened send links. Links are key'd by their entityPath. The send link for allowing the service * load balance messages is the eventHubName. */ private final ConcurrentHashMap<String, AmqpSendLink> openLinks = new ConcurrentHashMap<>(); private final ClientLogger logger = new ClientLogger(EventHubProducerAsyncClient.class); private final AtomicBoolean isDisposed = new AtomicBoolean(); private final String fullyQualifiedNamespace; private final String eventHubName; private final EventHubConnection connection; private final RetryOptions retryOptions; private final TracerProvider tracerProvider; private final MessageSerializer messageSerializer; private final boolean isSharedConnection; /** * Creates a new instance of this {@link EventHubProducerAsyncClient} that can send messages to a single partition * when {@link CreateBatchOptions * load balance the messages amongst available partitions. */ EventHubProducerAsyncClient(String fullyQualifiedNamespace, String eventHubName, EventHubConnection connection, RetryOptions retryOptions, TracerProvider tracerProvider, MessageSerializer messageSerializer, boolean isSharedConnection) { this.fullyQualifiedNamespace = fullyQualifiedNamespace; this.eventHubName = eventHubName; this.connection = connection; this.retryOptions = retryOptions; this.tracerProvider = tracerProvider; this.messageSerializer = messageSerializer; this.isSharedConnection = isSharedConnection; } /** * Gets the fully qualified Event Hubs namespace that the connection is associated with. This is likely similar to * {@code {yournamespace}.servicebus.windows.net}. * * @return The fully qualified Event Hubs namespace that the connection is associated with */ public String getFullyQualifiedNamespace() { return fullyQualifiedNamespace; } /** * Gets the Event Hub name this client interacts with. * * @return The Event Hub name this client interacts with. */ public String getEventHubName() { return eventHubName; } /** * Retrieves information about an Event Hub, including the number of partitions present and their identifiers. * * @return The set of information for the Event Hub that this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<EventHubProperties> getProperties() { return connection.getManagementNode().flatMap(EventHubManagementNode::getEventHubProperties); } /** * Retrieves the identifiers for the partitions of an Event Hub. * * @return A Flux of identifiers for the partitions of an Event Hub. */ public Flux<String> getPartitionIds() { return getProperties().flatMapMany(properties -> Flux.fromIterable(properties.getPartitionIds())); } /** * Retrieves information about a specific partition for an Event Hub, including elements that describe the available * events in the partition event stream. * * @param partitionId The unique identifier of a partition associated with the Event Hub. * @return The set of information for the requested partition under the Event Hub this client is associated with. */ @ServiceMethod(returns = ReturnType.SINGLE) public Mono<PartitionProperties> getPartitionProperties(String partitionId) { return connection.getManagementNode().flatMap(node -> node.getPartitionProperties(partitionId)); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch() { return createBatch(DEFAULT_BATCH_OPTIONS); } /** * Creates an {@link EventDataBatch} that can fit as many events as the transport allows. * * @param options A set of options used to configure the {@link EventDataBatch}. * * @return A new {@link EventDataBatch} that can fit as many events as the transport allows. */ public Mono<EventDataBatch> createBatch(CreateBatchOptions options) { if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } final CreateBatchOptions clone = options.clone(); if (!CoreUtils.isNullOrEmpty(clone.getPartitionKey()) && !CoreUtils.isNullOrEmpty(clone.getPartitionId())) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.getPartitionKey() and BatchOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", clone.getPartitionKey(), clone.getPartitionId()))); } else if (!CoreUtils.isNullOrEmpty(clone.getPartitionKey()) && clone.getPartitionKey().length() > MAX_PARTITION_KEY_LENGTH) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "PartitionKey '%s' exceeds the maximum allowed length: '%s'.", clone.getPartitionKey(), MAX_PARTITION_KEY_LENGTH))); } return getSendLink(clone.getPartitionId()) .flatMap(link -> link.getLinkSize() .flatMap(size -> { final int maximumLinkSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; if (clone.getMaximumSizeInBytes() > maximumLinkSize) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.maximumSizeInBytes (%s bytes) is larger than the link size (%s bytes).", clone.getMaximumSizeInBytes(), maximumLinkSize))); } final int batchSize = clone.getMaximumSizeInBytes() > 0 ? clone.getMaximumSizeInBytes() : maximumLinkSize; return Mono.just(new EventDataBatch(batchSize, clone.getPartitionId(), clone.getPartitionKey(), link::getErrorContext)); })); } /** * Sends a single event to the associated Event Hub. If the size of the single event exceeds the maximum size * allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } return send(Flux.just(event)); } /** * Sends a single event to the associated Event Hub with the send options. If the size of the single event exceeds * the maximum size allowed, an exception will be triggered and the send will fail. * * <p> * For more information regarding the maximum event size allowed, see * <a href="https: * Limits</a>. * </p> * * @param event Event to send to the service. * @param options The set of options to consider when sending this event. * * @return A {@link Mono} that completes when the event is pushed to the service. */ Mono<Void> send(EventData event, SendOptions options) { if (event == null) { return monoError(logger, new NullPointerException("'event' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.just(event), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(Flux.fromIterable(events)); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Iterable<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return send(Flux.fromIterable(events), options); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } return send(events, DEFAULT_SEND_OPTIONS); } /** * Sends a set of events to the associated Event Hub using a batched approach. If the size of events exceed the * maximum size of a single batch, an exception will be triggered and the send will fail. By default, the message * size is the max amount allowed on the link. * * @param events Events to send to the service. * @param options The set of options to consider when sending this batch. * * @return A {@link Mono} that completes when all events are pushed to the service. */ Mono<Void> send(Flux<EventData> events, SendOptions options) { if (events == null) { return monoError(logger, new NullPointerException("'events' cannot be null.")); } else if (options == null) { return monoError(logger, new NullPointerException("'options' cannot be null.")); } return sendInternal(events, options); } /** * Sends the batch to the associated Event Hub. * * @param batch The batch to send to the service. * * @return A {@link Mono} that completes when the batch is pushed to the service. * * @throws NullPointerException if {@code batch} is {@code null}. * @see EventHubProducerAsyncClient * @see EventHubProducerAsyncClient */ public Mono<Void> send(EventDataBatch batch) { if (batch == null) { return monoError(logger, new NullPointerException("'batch' cannot be null.")); } else if (batch.getEvents().isEmpty()) { logger.warning("Cannot send an EventBatch that is empty."); return Mono.empty(); } if (!CoreUtils.isNullOrEmpty(batch.getPartitionId())) { logger.info("Sending batch with size[{}] to partitionId[{}].", batch.getSize(), batch.getPartitionId()); } else if (!CoreUtils.isNullOrEmpty(batch.getPartitionKey())) { logger.info("Sending batch with size[{}] with partitionKey[{}].", batch.getSize(), batch.getPartitionKey()); } else { logger.info("Sending batch with size[{}] to be distributed round-robin in service.", batch.getSize()); } final String partitionKey = batch.getPartitionKey(); final List<Message> messages = batch.getEvents().stream().map(event -> { final Message message = messageSerializer.serialize(event); if (!CoreUtils.isNullOrEmpty(partitionKey)) { final MessageAnnotations messageAnnotations = message.getMessageAnnotations() == null ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } return message; }).collect(Collectors.toList()); return getSendLink(batch.getPartitionId()) .flatMap(link -> messages.size() == 1 ? link.send(messages.get(0)) : link.send(messages)); } private Mono<Void> sendInternal(Flux<EventData> events, SendOptions options) { final SendOptions clone = options.clone(); final boolean isTracingEnabled = tracerProvider.isEnabled(); if (!CoreUtils.isNullOrEmpty(clone.getPartitionKey()) && !CoreUtils.isNullOrEmpty(clone.getPartitionId())) { return monoError(logger, new IllegalArgumentException(String.format(Locale.US, "BatchOptions.getPartitionKey() and BatchOptions.getPartitionId() are both set. Only one or the" + " other can be used. partitionKey: '%s'. partitionId: '%s'", clone.getPartitionKey(), clone.getPartitionId()))); } return getSendLink(options.getPartitionId()) .flatMap(link -> { final AtomicReference<Context> sendSpanContext = isTracingEnabled ? new AtomicReference<>(Context.NONE) : null; return link.getLinkSize() .flatMap(size -> { final int batchSize = size > 0 ? size : MAX_MESSAGE_LENGTH_BYTES; final CreateBatchOptions createBatchOptions = new CreateBatchOptions() .setPartitionKey(options.getPartitionKey()) .setPartitionId(options.getPartitionId()) .setMaximumSizeInBytes(batchSize); final AtomicBoolean isFirst = new AtomicBoolean(true); return events.map(eventData -> { if (!isTracingEnabled) { return eventData; } final Context parentContext = eventData.getContext(); if (isFirst.getAndSet(false)) { Context entityContext = parentContext.addData(ENTITY_PATH_KEY, link.getEntityPath()); sendSpanContext.set(tracerProvider.startSpan( entityContext.addData(HOST_NAME_KEY, link.getHostname()), ProcessKind.SEND)); } return setSpanContext(eventData, parentContext); }).collect(new EventDataCollector(createBatchOptions, 1, link::getErrorContext)); }) .flatMap(list -> sendInternal(Flux.fromIterable(list))) .doOnEach(signal -> { if (isTracingEnabled) { tracerProvider.endSpan(sendSpanContext.get(), signal); } }); }); } private Mono<Void> sendInternal(Flux<EventDataBatch> eventBatches) { return eventBatches .flatMap(this::send) .then() .doOnError(error -> { logger.error("Error sending batch.", error); }); } private String getEntityPath(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? eventHubName : String.format(Locale.US, SENDER_ENTITY_PATH_FORMAT, eventHubName, partitionId); } private String getLinkName(String partitionId) { return CoreUtils.isNullOrEmpty(partitionId) ? StringUtil.getRandomString("EC") : StringUtil.getRandomString("PS"); } private Mono<AmqpSendLink> getSendLink(String partitionId) { final String entityPath = getEntityPath(partitionId); final AmqpSendLink openLink = openLinks.get(entityPath); if (openLink != null) { return Mono.just(openLink); } else { return connection.createSendLink(getLinkName(partitionId), entityPath, retryOptions) .map(link -> openLinks.computeIfAbsent(entityPath, unusedKey -> link)); } } /** * Disposes of the {@link EventHubProducerAsyncClient} by closing the underlying connection to the service. */ @Override public void close() { if (!isDisposed.getAndSet(true)) { openLinks.forEach((key, value) -> { try { value.close(); } catch (IOException e) { logger.warning("Error closing link for partition: {}", key, e); } }); openLinks.clear(); if (!isSharedConnection) { connection.close(); } } } /** * Collects EventData into EventDataBatch to send to Event Hubs. If {@code maxNumberOfBatches} is {@code null} then * it'll collect as many batches as possible. Otherwise, if there are more events than can fit into {@code * maxNumberOfBatches}, then the collector throws a {@link AmqpException} with {@link * ErrorCondition */ private static class EventDataCollector implements Collector<EventData, List<EventDataBatch>, List<EventDataBatch>> { private final String partitionKey; private final String partitionId; private final int maxMessageSize; private final Integer maxNumberOfBatches; private final ErrorContextProvider contextProvider; private volatile EventDataBatch currentBatch; EventDataCollector(CreateBatchOptions options, Integer maxNumberOfBatches, ErrorContextProvider contextProvider) { this.maxNumberOfBatches = maxNumberOfBatches; this.maxMessageSize = options.getMaximumSizeInBytes() > 0 ? options.getMaximumSizeInBytes() : MAX_MESSAGE_LENGTH_BYTES; this.partitionKey = options.getPartitionKey(); this.partitionId = options.getPartitionId(); this.contextProvider = contextProvider; currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider); } @Override public Supplier<List<EventDataBatch>> supplier() { return ArrayList::new; } @Override public BiConsumer<List<EventDataBatch>, EventData> accumulator() { return (list, event) -> { EventDataBatch batch = currentBatch; if (batch.tryAdd(event)) { return; } if (maxNumberOfBatches != null && list.size() == maxNumberOfBatches) { final String message = String.format(Locale.US, "EventData does not fit into maximum number of batches. '%s'", maxNumberOfBatches); throw new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, message, contextProvider.getErrorContext()); } currentBatch = new EventDataBatch(maxMessageSize, partitionId, partitionKey, contextProvider); currentBatch.tryAdd(event); list.add(batch); }; } @Override public BinaryOperator<List<EventDataBatch>> combiner() { return (existing, another) -> { existing.addAll(another); return existing; }; } @Override public Function<List<EventDataBatch>, List<EventDataBatch>> finisher() { return list -> { EventDataBatch batch = currentBatch; currentBatch = null; if (batch != null) { list.add(batch); } return list; }; } @Override public Set<Characteristics> characteristics() { return Collections.emptySet(); } } }
Thinking about the arguments... it should be (Context context, String key, T defaultValue, Class<T> clazz).
private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.putAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.putAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getOrDefault(ENTITY_PATH_KEY, "", String.class, context))); span.putAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getOrDefault(HOST_NAME_KEY, "", String.class, context))); }
AttributeValue.stringAttributeValue(getOrDefault(HOST_NAME_KEY, "", String.class, context)));
private void addSpanRequestAttributes(Span span, Context context, String spanName) { Objects.requireNonNull(span, "'span' cannot be null."); span.putAttribute(COMPONENT, AttributeValue.stringAttributeValue(parseComponentValue(spanName))); span.putAttribute( MESSAGE_BUS_DESTINATION, AttributeValue.stringAttributeValue(getOrDefault(context, ENTITY_PATH_KEY, "", String.class))); span.putAttribute( PEER_ENDPOINT, AttributeValue.stringAttributeValue(getOrDefault(context, HOST_NAME_KEY, "", String.class))); }
class OpenCensusTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = Tracing.getTracer(); static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenCensusTracer.class); /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); SpanBuilder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); return context.addData(PARENT_SPAN_KEY, span); } /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; SpanBuilder spanBuilder; switch (processKind) { case SEND: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.CLIENT).startSpan(); if (span.getOptions().contains(Span.Options.RECORD_EVENTS)) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getOrDefault(PARENT_SPAN_KEY, null, Span.class, context); if (span == null) { return; } if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void setAttribute(String key, String value, Context context) { if (CoreUtils.isNullOrEmpty(value)) { logger.info("Failed to set span attribute since value is null or empty."); return; } final Span span = getOrDefault(PARENT_SPAN_KEY, null, Span.class, context); if (span != null) { span.putAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } } /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getOrDefault(PARENT_SPAN_KEY, null, Span.class, context); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void addLink(Context context) { final Span span = getOrDefault(PARENT_SPAN_KEY, null, Span.class, context); if (span == null) { logger.warning("Failed to find span to link it."); return; } final SpanContext spanContext = getOrDefault(SPAN_CONTEXT_KEY, null, SpanContext.class, context); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } span.addLink(Link.fromSpanContext(spanContext, PARENT_LINKED_SPAN)); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } /** * Starts a new child {@link Span} with parent being the remote and uses the {@link Span} is in the current Context, * to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link Context} containing the {@link SpanContext}. * @return The returned {@link Span} and the scope in a {@link Context} object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getOrDefault(SPAN_CONTEXT_KEY, null, SpanContext.class, context); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { SpanBuilder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link SpanBuilder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} designated by the {@link SpanContext}. */ private Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { SpanBuilder spanBuilder = TRACER.spanBuilderWithRemoteParent(spanName, spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as * text and returns in a {@link Context} object. * * @param span The current tracing span. * @return The {@link Context} containing the {@link SpanContext} and traceparent of the current span. */ private Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name. * @return The component name contained in the context. */ private static String parseComponentValue(String spanName) { if (spanName != null && spanName.length() > 0) { int componentNameStartIndex = spanName.indexOf("."); int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameStartIndex != -1 && componentNameEndIndex != -1) { return spanName.substring(componentNameStartIndex + 1, componentNameEndIndex); } } return ""; } /** * Returns a {@link SpanBuilder} to create and start a new child {@link Span} with parent being the designated * {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@link SpanBuilder} to create and start a new {@link Span}. */ private SpanBuilder getSpanBuilder(String spanName, Context context) { Span parentSpan = getOrDefault(PARENT_SPAN_KEY, null, Span.class, context); String spanNameKey = getOrDefault(USER_SPAN_NAME_KEY, null, String.class, context); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilderWithExplicitParent(spanNameKey, parentSpan); } /** * Returns the value of the specified key from the context. * * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @param defaultValue the value to return in data not found. * @param clazz clazz the type of raw class to find data for. * @param context The context containing the specified key. * * @return The T type of raw class object */ @SuppressWarnings("unchecked") private <T> T getOrDefault(Object key, T defaultValue, Class<T> clazz, Context context) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { logger.warning("Could not extract key '{}' of type '{}' from context.", key, clazz); return defaultValue; }); return (T) result; } }
class OpenCensusTracer implements com.azure.core.util.tracing.Tracer { private static final Tracer TRACER = Tracing.getTracer(); static final String COMPONENT = "component"; static final String MESSAGE_BUS_DESTINATION = "message_bus.destination"; static final String PEER_ENDPOINT = "peer.address"; private final ClientLogger logger = new ClientLogger(OpenCensusTracer.class); /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); SpanBuilder spanBuilder = getSpanBuilder(spanName, context); Span span = spanBuilder.startSpan(); return context.addData(PARENT_SPAN_KEY, span); } /** * {@inheritDoc} */ @Override public Context start(String spanName, Context context, ProcessKind processKind) { Objects.requireNonNull(spanName, "'spanName' cannot be null."); Objects.requireNonNull(context, "'context' cannot be null."); Objects.requireNonNull(processKind, "'processKind' cannot be null."); Span span; SpanBuilder spanBuilder; switch (processKind) { case SEND: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.CLIENT).startSpan(); if (span.getOptions().contains(Span.Options.RECORD_EVENTS)) { addSpanRequestAttributes(span, context, spanName); } return context.addData(PARENT_SPAN_KEY, span); case MESSAGE: spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.startSpan(); context = setContextData(span); return context.addData(PARENT_SPAN_KEY, span); case PROCESS: return startScopedSpan(spanName, context); default: return Context.NONE; } } /** * {@inheritDoc} */ @Override public void end(int responseCode, Throwable throwable, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { return; } if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(HttpTraceUtil.parseResponseStatus(responseCode, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void setAttribute(String key, String value, Context context) { if (CoreUtils.isNullOrEmpty(value)) { logger.info("Failed to set span attribute since value is null or empty."); return; } final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span != null) { span.putAttribute(key, AttributeValue.stringAttributeValue(value)); } else { logger.warning("Failed to find span to add attribute."); } } /** * {@inheritDoc} */ @Override public Context setSpanName(String spanName, Context context) { return context.addData(USER_SPAN_NAME_KEY, spanName); } /** * {@inheritDoc} */ @Override public void end(String statusMessage, Throwable throwable, Context context) { final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.warning("Failed to find span to end it."); return; } if (span.getOptions().contains(Options.RECORD_EVENTS)) { span.setStatus(AmqpTraceUtil.parseStatusMessage(statusMessage, throwable)); } span.end(); } /** * {@inheritDoc} */ @Override public void addLink(Context context) { final Span span = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); if (span == null) { logger.warning("Failed to find span to link it."); return; } final SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext == null) { logger.warning("Failed to find span context to link it."); return; } span.addLink(Link.fromSpanContext(spanContext, PARENT_LINKED_SPAN)); } /** * {@inheritDoc} */ @Override public Context extractContext(String diagnosticId, Context context) { return AmqpPropagationFormatUtil.extractContext(diagnosticId, context); } /** * Starts a new child {@link Span} with parent being the remote and uses the {@link Span} is in the current Context, * to return an object that represents that scope. * <p>The scope is exited when the returned object is closed.</p> * * @param spanName The name of the returned Span. * @param context The {@link Context} containing the {@link SpanContext}. * @return The returned {@link Span} and the scope in a {@link Context} object. */ private Context startScopedSpan(String spanName, Context context) { Objects.requireNonNull(context, "'context' cannot be null."); Span span; SpanContext spanContext = getOrDefault(context, SPAN_CONTEXT_KEY, null, SpanContext.class); if (spanContext != null) { span = startSpanWithRemoteParent(spanName, spanContext); } else { SpanBuilder spanBuilder = getSpanBuilder(spanName, context); span = spanBuilder.setSpanKind(Span.Kind.SERVER).startSpan(); } return context.addData(PARENT_SPAN_KEY, span).addData("scope", TRACER.withSpan(span)); } /** * Creates a {@link SpanBuilder} to create and start a new child {@link Span} with parent being the remote and * designated by the {@link SpanContext}. * * @param spanName The name of the returned Span. * @param spanContext The remote parent context of the returned Span. * @return A {@link Span} with parent being the remote {@link Span} designated by the {@link SpanContext}. */ private Span startSpanWithRemoteParent(String spanName, SpanContext spanContext) { SpanBuilder spanBuilder = TRACER.spanBuilderWithRemoteParent(spanName, spanContext); spanBuilder.setSpanKind(Span.Kind.SERVER); return spanBuilder.startSpan(); } /** * Extracts the {@link SpanContext trace identifiers} and the {@link SpanContext} of the current tracing span as * text and returns in a {@link Context} object. * * @param span The current tracing span. * @return The {@link Context} containing the {@link SpanContext} and traceparent of the current span. */ private Context setContextData(Span span) { SpanContext spanContext = span.getContext(); final String traceparent = AmqpPropagationFormatUtil.getDiagnosticId(spanContext); return new Context(DIAGNOSTIC_ID_KEY, traceparent).addData(SPAN_CONTEXT_KEY, spanContext); } /** * Extracts request attributes from the given {@code context} and adds it to the started span. * * @param span The span to which request attributes are to be added. * @param context The context containing the request attributes. * @param spanName The name of the returned Span containing the component value. */ /** * Extracts the component name from the given span name. * * @param spanName The spanName containing the component name. * @return The component name contained in the context. */ private static String parseComponentValue(String spanName) { if (spanName != null && spanName.length() > 0) { int componentNameStartIndex = spanName.indexOf("."); int componentNameEndIndex = spanName.lastIndexOf("."); if (componentNameStartIndex != -1 && componentNameEndIndex != -1) { return spanName.substring(componentNameStartIndex + 1, componentNameEndIndex); } } return ""; } /** * Returns a {@link SpanBuilder} to create and start a new child {@link Span} with parent being the designated * {@code Span}. * * @param spanName The name of the returned Span. * @param context The context containing the span and the span name. * @return A {@link SpanBuilder} to create and start a new {@link Span}. */ private SpanBuilder getSpanBuilder(String spanName, Context context) { Span parentSpan = getOrDefault(context, PARENT_SPAN_KEY, null, Span.class); String spanNameKey = getOrDefault(context, USER_SPAN_NAME_KEY, null, String.class); if (spanNameKey == null) { spanNameKey = spanName; } if (parentSpan == null) { parentSpan = TRACER.getCurrentSpan(); } return TRACER.spanBuilderWithExplicitParent(spanNameKey, parentSpan); } /** * Returns the value of the specified key from the context. * * @param key The name of the attribute that needs to be extracted from the {@code Context}. * @param defaultValue the value to return in data not found. * @param clazz clazz the type of raw class to find data for. * @param context The context containing the specified key. * * @return The T type of raw class object */ @SuppressWarnings("unchecked") private <T> T getOrDefault(Context context, String key, T defaultValue, Class<T> clazz) { final Optional<Object> optional = context.getData(key); final Object result = optional.filter(value -> clazz.isAssignableFrom(value.getClass())).orElseGet(() -> { logger.warning("Could not extract key '{}' of type '{}' from context.", key, clazz); return defaultValue; }); return (T) result; } }
I'm not strong on this change.
public static void validateCpk(CpkInfo customerProvidedKey, String endpoint) { if (customerProvidedKey != null && !BlobUrlParts.parse(endpoint).getScheme().equals(Constants.HTTPS)) { throw new IllegalArgumentException("Using a customer provided key requires https"); } }
throw new IllegalArgumentException("Using a customer provided key requires https");
public static void validateCpk(CpkInfo customerProvidedKey, String endpoint) { if (customerProvidedKey != null && !BlobUrlParts.parse(endpoint).getScheme().equals(Constants.HTTPS)) { throw new IllegalArgumentException("Using a customer provided key requires https"); } }
class BuilderHelper { private static final String DEFAULT_USER_AGENT_NAME = "azure-storage-blob"; private static final String DEFAULT_USER_AGENT_VERSION = "12.1.0-beta.1"; /** * Constructs a {@link HttpPipeline} from values passed from a builder. * * @param storageSharedKeyCredential {@link StorageSharedKeyCredential} if present. * @param tokenCredential {@link TokenCredential} if present. * @param sasTokenCredential {@link SasTokenCredential} if present. * @param endpoint The endpoint for the client. * @param retryOptions Retry options to set in the retry policy. * @param logOptions Logging options to set in the logging policy. * @param httpClient HttpClient to use in the builder. * @param additionalPolicies Additional {@link HttpPipelinePolicy policies} to set in the pipeline. * @param configuration Configuration store contain environment settings. * @param serviceVersion {@link BlobServiceVersion} of the service to be used when making requests. * @return A new {@link HttpPipeline} from the passed values. */ public static HttpPipeline buildPipeline(StorageSharedKeyCredential storageSharedKeyCredential, TokenCredential tokenCredential, SasTokenCredential sasTokenCredential, String endpoint, RequestRetryOptions retryOptions, HttpLogOptions logOptions, HttpClient httpClient, List<HttpPipelinePolicy> additionalPolicies, Configuration configuration, BlobServiceVersion serviceVersion) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(getUserAgentPolicy(configuration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); HttpPipelinePolicy credentialPolicy; if (storageSharedKeyCredential != null) { credentialPolicy = new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential); } else if (tokenCredential != null) { credentialPolicy = new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)); } else if (sasTokenCredential != null) { credentialPolicy = new SasTokenCredentialPolicy(sasTokenCredential); } else { credentialPolicy = null; } if (credentialPolicy != null) { policies.add(credentialPolicy); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(getResponseValidationPolicy()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Gets the default http log option for Storage Blob. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { HttpLogOptions defaultOptions = new HttpLogOptions(); BlobHeadersAndQueryParameters.getBlobHeaders().forEach(defaultOptions::addAllowedHeaderName); BlobHeadersAndQueryParameters.getBlobQueryParameters().forEach(defaultOptions::addAllowedQueryParamName); return defaultOptions; } /** * Gets the endpoint for the blob service based on the parsed URL. * * @param parts The {@link BlobUrlParts} from the parse URL. * @return The endpoint for the blob service. */ public static String getEndpoint(BlobUrlParts parts) { if (ModelHelper.IP_V4_URL_PATTERN.matcher(parts.getHost()).find()) { return String.format("%s: } else { return String.format("%s: } } /** * Validates that the client is properly configured for using cpk. * * @param customerProvidedKey The cpk object. * @param endpoint The endpoint for the client. */ /* * Creates a {@link UserAgentPolicy} using the default blob module name and version. * * @param configuration Configuration store used to determine whether telemetry information should be included. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return The default {@link UserAgentPolicy} for the module. */ private static UserAgentPolicy getUserAgentPolicy(Configuration configuration, BlobServiceVersion serviceVersion) { configuration = (configuration == null) ? Configuration.NONE : configuration; return new UserAgentPolicy(DEFAULT_USER_AGENT_NAME, DEFAULT_USER_AGENT_VERSION, configuration, serviceVersion); } /* * Creates a {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} used to validate response data from * the service. * * @return The {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} for the module. */ private static HttpPipelinePolicy getResponseValidationPolicy() { return new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build(); } }
class BuilderHelper { private static final String DEFAULT_USER_AGENT_NAME = "azure-storage-blob"; private static final String DEFAULT_USER_AGENT_VERSION = "12.1.0-beta.1"; /** * Constructs a {@link HttpPipeline} from values passed from a builder. * * @param storageSharedKeyCredential {@link StorageSharedKeyCredential} if present. * @param tokenCredential {@link TokenCredential} if present. * @param sasTokenCredential {@link SasTokenCredential} if present. * @param endpoint The endpoint for the client. * @param retryOptions Retry options to set in the retry policy. * @param logOptions Logging options to set in the logging policy. * @param httpClient HttpClient to use in the builder. * @param additionalPolicies Additional {@link HttpPipelinePolicy policies} to set in the pipeline. * @param configuration Configuration store contain environment settings. * @param serviceVersion {@link BlobServiceVersion} of the service to be used when making requests. * @return A new {@link HttpPipeline} from the passed values. */ public static HttpPipeline buildPipeline(StorageSharedKeyCredential storageSharedKeyCredential, TokenCredential tokenCredential, SasTokenCredential sasTokenCredential, String endpoint, RequestRetryOptions retryOptions, HttpLogOptions logOptions, HttpClient httpClient, List<HttpPipelinePolicy> additionalPolicies, Configuration configuration, BlobServiceVersion serviceVersion) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(getUserAgentPolicy(configuration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); HttpPipelinePolicy credentialPolicy; if (storageSharedKeyCredential != null) { credentialPolicy = new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential); } else if (tokenCredential != null) { credentialPolicy = new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)); } else if (sasTokenCredential != null) { credentialPolicy = new SasTokenCredentialPolicy(sasTokenCredential); } else { credentialPolicy = null; } if (credentialPolicy != null) { policies.add(credentialPolicy); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(getResponseValidationPolicy()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Gets the default http log option for Storage Blob. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { HttpLogOptions defaultOptions = new HttpLogOptions(); BlobHeadersAndQueryParameters.getBlobHeaders().forEach(defaultOptions::addAllowedHeaderName); BlobHeadersAndQueryParameters.getBlobQueryParameters().forEach(defaultOptions::addAllowedQueryParamName); return defaultOptions; } /** * Gets the endpoint for the blob service based on the parsed URL. * * @param parts The {@link BlobUrlParts} from the parse URL. * @return The endpoint for the blob service. */ public static String getEndpoint(BlobUrlParts parts) { if (ModelHelper.IP_V4_URL_PATTERN.matcher(parts.getHost()).find()) { return String.format("%s: } else { return String.format("%s: } } /** * Validates that the client is properly configured for using cpk. * * @param customerProvidedKey The cpk object. * @param endpoint The endpoint for the client. */ /* * Creates a {@link UserAgentPolicy} using the default blob module name and version. * * @param configuration Configuration store used to determine whether telemetry information should be included. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return The default {@link UserAgentPolicy} for the module. */ private static UserAgentPolicy getUserAgentPolicy(Configuration configuration, BlobServiceVersion serviceVersion) { configuration = (configuration == null) ? Configuration.NONE : configuration; return new UserAgentPolicy(DEFAULT_USER_AGENT_NAME, DEFAULT_USER_AGENT_VERSION, configuration, serviceVersion); } /* * Creates a {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} used to validate response data from * the service. * * @return The {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} for the module. */ private static HttpPipelinePolicy getResponseValidationPolicy() { return new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build(); } }
Should this throw an `IllegalStateException` instead? This feels more like an invalid state than an invalid argument.
public static void validateCpk(CpkInfo customerProvidedKey, String endpoint) { if (customerProvidedKey != null && !BlobUrlParts.parse(endpoint).getScheme().equals(Constants.HTTPS)) { throw new IllegalArgumentException("Using a customer provided key requires https"); } }
throw new IllegalArgumentException("Using a customer provided key requires https");
public static void validateCpk(CpkInfo customerProvidedKey, String endpoint) { if (customerProvidedKey != null && !BlobUrlParts.parse(endpoint).getScheme().equals(Constants.HTTPS)) { throw new IllegalArgumentException("Using a customer provided key requires https"); } }
class BuilderHelper { private static final String DEFAULT_USER_AGENT_NAME = "azure-storage-blob"; private static final String DEFAULT_USER_AGENT_VERSION = "12.1.0-beta.1"; /** * Constructs a {@link HttpPipeline} from values passed from a builder. * * @param storageSharedKeyCredential {@link StorageSharedKeyCredential} if present. * @param tokenCredential {@link TokenCredential} if present. * @param sasTokenCredential {@link SasTokenCredential} if present. * @param endpoint The endpoint for the client. * @param retryOptions Retry options to set in the retry policy. * @param logOptions Logging options to set in the logging policy. * @param httpClient HttpClient to use in the builder. * @param additionalPolicies Additional {@link HttpPipelinePolicy policies} to set in the pipeline. * @param configuration Configuration store contain environment settings. * @param serviceVersion {@link BlobServiceVersion} of the service to be used when making requests. * @return A new {@link HttpPipeline} from the passed values. */ public static HttpPipeline buildPipeline(StorageSharedKeyCredential storageSharedKeyCredential, TokenCredential tokenCredential, SasTokenCredential sasTokenCredential, String endpoint, RequestRetryOptions retryOptions, HttpLogOptions logOptions, HttpClient httpClient, List<HttpPipelinePolicy> additionalPolicies, Configuration configuration, BlobServiceVersion serviceVersion) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(getUserAgentPolicy(configuration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); HttpPipelinePolicy credentialPolicy; if (storageSharedKeyCredential != null) { credentialPolicy = new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential); } else if (tokenCredential != null) { credentialPolicy = new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)); } else if (sasTokenCredential != null) { credentialPolicy = new SasTokenCredentialPolicy(sasTokenCredential); } else { credentialPolicy = null; } if (credentialPolicy != null) { policies.add(credentialPolicy); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(getResponseValidationPolicy()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Gets the default http log option for Storage Blob. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { HttpLogOptions defaultOptions = new HttpLogOptions(); BlobHeadersAndQueryParameters.getBlobHeaders().forEach(defaultOptions::addAllowedHeaderName); BlobHeadersAndQueryParameters.getBlobQueryParameters().forEach(defaultOptions::addAllowedQueryParamName); return defaultOptions; } /** * Gets the endpoint for the blob service based on the parsed URL. * * @param parts The {@link BlobUrlParts} from the parse URL. * @return The endpoint for the blob service. */ public static String getEndpoint(BlobUrlParts parts) { if (ModelHelper.IP_V4_URL_PATTERN.matcher(parts.getHost()).find()) { return String.format("%s: } else { return String.format("%s: } } /** * Validates that the client is properly configured for using cpk. * * @param customerProvidedKey The cpk object. * @param endpoint The endpoint for the client. */ /* * Creates a {@link UserAgentPolicy} using the default blob module name and version. * * @param configuration Configuration store used to determine whether telemetry information should be included. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return The default {@link UserAgentPolicy} for the module. */ private static UserAgentPolicy getUserAgentPolicy(Configuration configuration, BlobServiceVersion serviceVersion) { configuration = (configuration == null) ? Configuration.NONE : configuration; return new UserAgentPolicy(DEFAULT_USER_AGENT_NAME, DEFAULT_USER_AGENT_VERSION, configuration, serviceVersion); } /* * Creates a {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} used to validate response data from * the service. * * @return The {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} for the module. */ private static HttpPipelinePolicy getResponseValidationPolicy() { return new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build(); } }
class BuilderHelper { private static final String DEFAULT_USER_AGENT_NAME = "azure-storage-blob"; private static final String DEFAULT_USER_AGENT_VERSION = "12.1.0-beta.1"; /** * Constructs a {@link HttpPipeline} from values passed from a builder. * * @param storageSharedKeyCredential {@link StorageSharedKeyCredential} if present. * @param tokenCredential {@link TokenCredential} if present. * @param sasTokenCredential {@link SasTokenCredential} if present. * @param endpoint The endpoint for the client. * @param retryOptions Retry options to set in the retry policy. * @param logOptions Logging options to set in the logging policy. * @param httpClient HttpClient to use in the builder. * @param additionalPolicies Additional {@link HttpPipelinePolicy policies} to set in the pipeline. * @param configuration Configuration store contain environment settings. * @param serviceVersion {@link BlobServiceVersion} of the service to be used when making requests. * @return A new {@link HttpPipeline} from the passed values. */ public static HttpPipeline buildPipeline(StorageSharedKeyCredential storageSharedKeyCredential, TokenCredential tokenCredential, SasTokenCredential sasTokenCredential, String endpoint, RequestRetryOptions retryOptions, HttpLogOptions logOptions, HttpClient httpClient, List<HttpPipelinePolicy> additionalPolicies, Configuration configuration, BlobServiceVersion serviceVersion) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(getUserAgentPolicy(configuration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); HttpPipelinePolicy credentialPolicy; if (storageSharedKeyCredential != null) { credentialPolicy = new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential); } else if (tokenCredential != null) { credentialPolicy = new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)); } else if (sasTokenCredential != null) { credentialPolicy = new SasTokenCredentialPolicy(sasTokenCredential); } else { credentialPolicy = null; } if (credentialPolicy != null) { policies.add(credentialPolicy); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(getResponseValidationPolicy()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Gets the default http log option for Storage Blob. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { HttpLogOptions defaultOptions = new HttpLogOptions(); BlobHeadersAndQueryParameters.getBlobHeaders().forEach(defaultOptions::addAllowedHeaderName); BlobHeadersAndQueryParameters.getBlobQueryParameters().forEach(defaultOptions::addAllowedQueryParamName); return defaultOptions; } /** * Gets the endpoint for the blob service based on the parsed URL. * * @param parts The {@link BlobUrlParts} from the parse URL. * @return The endpoint for the blob service. */ public static String getEndpoint(BlobUrlParts parts) { if (ModelHelper.IP_V4_URL_PATTERN.matcher(parts.getHost()).find()) { return String.format("%s: } else { return String.format("%s: } } /** * Validates that the client is properly configured for using cpk. * * @param customerProvidedKey The cpk object. * @param endpoint The endpoint for the client. */ /* * Creates a {@link UserAgentPolicy} using the default blob module name and version. * * @param configuration Configuration store used to determine whether telemetry information should be included. * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return The default {@link UserAgentPolicy} for the module. */ private static UserAgentPolicy getUserAgentPolicy(Configuration configuration, BlobServiceVersion serviceVersion) { configuration = (configuration == null) ? Configuration.NONE : configuration; return new UserAgentPolicy(DEFAULT_USER_AGENT_NAME, DEFAULT_USER_AGENT_VERSION, configuration, serviceVersion); } /* * Creates a {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} used to validate response data from * the service. * * @return The {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} for the module. */ private static HttpPipelinePolicy getResponseValidationPolicy() { return new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .addOptionalEcho(Constants.HeaderConstants.ENCRYPTION_KEY_SHA256) .build(); } }
Could you add a check before this call to ensure there is a credential set in the builder? The changes made here no longer have it throw an `IllegalArgumentException` when no credential has been set.
public BlobServiceAsyncClient buildAsyncClient() { BuilderHelper.validateCpk(customerProvidedKey, endpoint); BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline( storageSharedKeyCredential, tokenCredential, sasTokenCredential, endpoint, retryOptions, logOptions, httpClient, additionalPolicies, configuration, serviceVersion); return new BlobServiceAsyncClient(pipeline, endpoint, serviceVersion, accountName, customerProvidedKey); }
HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline(
public BlobServiceAsyncClient buildAsyncClient() { BuilderHelper.validateCpk(customerProvidedKey, endpoint); if (Objects.isNull(storageSharedKeyCredential) && Objects.isNull(tokenCredential) && Objects.isNull(sasTokenCredential)) { throw logger.logExceptionAsError(new IllegalArgumentException("Blob Service Client cannot be accessed " + "anonymously. Please provide a form of authentication")); } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline( storageSharedKeyCredential, tokenCredential, sasTokenCredential, endpoint, retryOptions, logOptions, httpClient, additionalPolicies, configuration, serviceVersion); return new BlobServiceAsyncClient(pipeline, endpoint, serviceVersion, accountName, customerProvidedKey); }
class BlobServiceClientBuilder { private final ClientLogger logger = new ClientLogger(BlobServiceClientBuilder.class); private String endpoint; private String accountName; private CpkInfo customerProvidedKey; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private BlobServiceVersion version; /** * Creates a builder instance that is able to configure and construct {@link BlobServiceClient BlobServiceClients} * and {@link BlobServiceAsyncClient BlobServiceAsyncClients}. */ public BlobServiceClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * @return a {@link BlobServiceClient} created from the configurations in this builder. */ public BlobServiceClient buildClient() { return new BlobServiceClient(buildAsyncClient()); } /** * @return a {@link BlobServiceAsyncClient} created from the configurations in this builder. */ /** * Sets the blob service endpoint, additionally parses it for information (SAS token) * * @param endpoint URL of the service * @return the updated BlobServiceClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ public BlobServiceClientBuilder endpoint(String endpoint) { try { BlobUrlParts parts = BlobUrlParts.parse(new URL(endpoint)); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); String sasToken = parts.getSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage endpoint url is malformed.")); } return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey Customer provided key containing the encryption key information. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public BlobServiceClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public BlobServiceClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public BlobServiceClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated BlobServiceClientBuilder * @throws IllegalArgumentException If {@code connectionString} in invalid. * @throws NullPointerException If {@code connectionString} is {@code null}. */ public BlobServiceClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, logger); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw logger .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. * * @param pipelinePolicy a pipeline policy * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public BlobServiceClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public BlobServiceClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage whitelist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public BlobServiceClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } }
class BlobServiceClientBuilder { private final ClientLogger logger = new ClientLogger(BlobServiceClientBuilder.class); private String endpoint; private String accountName; private CpkInfo customerProvidedKey; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private BlobServiceVersion version; /** * Creates a builder instance that is able to configure and construct {@link BlobServiceClient BlobServiceClients} * and {@link BlobServiceAsyncClient BlobServiceAsyncClients}. */ public BlobServiceClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * @return a {@link BlobServiceClient} created from the configurations in this builder. */ public BlobServiceClient buildClient() { return new BlobServiceClient(buildAsyncClient()); } /** * @return a {@link BlobServiceAsyncClient} created from the configurations in this builder. * @throws IllegalArgumentException If no credentials are provided. */ /** * Sets the blob service endpoint, additionally parses it for information (SAS token) * * @param endpoint URL of the service * @return the updated BlobServiceClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ public BlobServiceClientBuilder endpoint(String endpoint) { try { BlobUrlParts parts = BlobUrlParts.parse(new URL(endpoint)); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); String sasToken = parts.getSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage endpoint url is malformed.")); } return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey Customer provided key containing the encryption key information. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public BlobServiceClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public BlobServiceClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public BlobServiceClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated BlobServiceClientBuilder * @throws IllegalArgumentException If {@code connectionString} in invalid. * @throws NullPointerException If {@code connectionString} is {@code null}. */ public BlobServiceClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, logger); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw logger .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. * * @param pipelinePolicy a pipeline policy * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public BlobServiceClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public BlobServiceClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage whitelist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public BlobServiceClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } }
Good catch!
public BlobServiceAsyncClient buildAsyncClient() { BuilderHelper.validateCpk(customerProvidedKey, endpoint); BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline( storageSharedKeyCredential, tokenCredential, sasTokenCredential, endpoint, retryOptions, logOptions, httpClient, additionalPolicies, configuration, serviceVersion); return new BlobServiceAsyncClient(pipeline, endpoint, serviceVersion, accountName, customerProvidedKey); }
HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline(
public BlobServiceAsyncClient buildAsyncClient() { BuilderHelper.validateCpk(customerProvidedKey, endpoint); if (Objects.isNull(storageSharedKeyCredential) && Objects.isNull(tokenCredential) && Objects.isNull(sasTokenCredential)) { throw logger.logExceptionAsError(new IllegalArgumentException("Blob Service Client cannot be accessed " + "anonymously. Please provide a form of authentication")); } BlobServiceVersion serviceVersion = version != null ? version : BlobServiceVersion.getLatest(); HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline( storageSharedKeyCredential, tokenCredential, sasTokenCredential, endpoint, retryOptions, logOptions, httpClient, additionalPolicies, configuration, serviceVersion); return new BlobServiceAsyncClient(pipeline, endpoint, serviceVersion, accountName, customerProvidedKey); }
class BlobServiceClientBuilder { private final ClientLogger logger = new ClientLogger(BlobServiceClientBuilder.class); private String endpoint; private String accountName; private CpkInfo customerProvidedKey; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private BlobServiceVersion version; /** * Creates a builder instance that is able to configure and construct {@link BlobServiceClient BlobServiceClients} * and {@link BlobServiceAsyncClient BlobServiceAsyncClients}. */ public BlobServiceClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * @return a {@link BlobServiceClient} created from the configurations in this builder. */ public BlobServiceClient buildClient() { return new BlobServiceClient(buildAsyncClient()); } /** * @return a {@link BlobServiceAsyncClient} created from the configurations in this builder. */ /** * Sets the blob service endpoint, additionally parses it for information (SAS token) * * @param endpoint URL of the service * @return the updated BlobServiceClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ public BlobServiceClientBuilder endpoint(String endpoint) { try { BlobUrlParts parts = BlobUrlParts.parse(new URL(endpoint)); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); String sasToken = parts.getSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage endpoint url is malformed.")); } return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey Customer provided key containing the encryption key information. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public BlobServiceClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public BlobServiceClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public BlobServiceClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated BlobServiceClientBuilder * @throws IllegalArgumentException If {@code connectionString} in invalid. * @throws NullPointerException If {@code connectionString} is {@code null}. */ public BlobServiceClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, logger); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw logger .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. * * @param pipelinePolicy a pipeline policy * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public BlobServiceClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public BlobServiceClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage whitelist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public BlobServiceClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } }
class BlobServiceClientBuilder { private final ClientLogger logger = new ClientLogger(BlobServiceClientBuilder.class); private String endpoint; private String accountName; private CpkInfo customerProvidedKey; private StorageSharedKeyCredential storageSharedKeyCredential; private TokenCredential tokenCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private BlobServiceVersion version; /** * Creates a builder instance that is able to configure and construct {@link BlobServiceClient BlobServiceClients} * and {@link BlobServiceAsyncClient BlobServiceAsyncClients}. */ public BlobServiceClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * @return a {@link BlobServiceClient} created from the configurations in this builder. */ public BlobServiceClient buildClient() { return new BlobServiceClient(buildAsyncClient()); } /** * @return a {@link BlobServiceAsyncClient} created from the configurations in this builder. * @throws IllegalArgumentException If no credentials are provided. */ /** * Sets the blob service endpoint, additionally parses it for information (SAS token) * * @param endpoint URL of the service * @return the updated BlobServiceClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is a malformed URL. */ public BlobServiceClientBuilder endpoint(String endpoint) { try { BlobUrlParts parts = BlobUrlParts.parse(new URL(endpoint)); this.accountName = parts.getAccountName(); this.endpoint = BuilderHelper.getEndpoint(parts); String sasToken = parts.getSasQueryParameters().encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage endpoint url is malformed.")); } return this; } /** * Sets the {@link CustomerProvidedKey customer provided key} that is used to encrypt blob contents on the server. * * @param customerProvidedKey Customer provided key containing the encryption key information. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder customerProvidedKey(CustomerProvidedKey customerProvidedKey) { if (customerProvidedKey == null) { this.customerProvidedKey = null; } else { this.customerProvidedKey = new CpkInfo() .setEncryptionKey(customerProvidedKey.getKey()) .setEncryptionKeySha256(customerProvidedKey.getKeySha256()) .setEncryptionAlgorithm(customerProvidedKey.getEncryptionAlgorithm()); } return this; } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public BlobServiceClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.tokenCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the {@link TokenCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public BlobServiceClientBuilder credential(TokenCredential credential) { this.tokenCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.storageSharedKeyCredential = null; this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated BlobServiceClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public BlobServiceClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; this.tokenCredential = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated BlobServiceClientBuilder * @throws IllegalArgumentException If {@code connectionString} in invalid. * @throws NullPointerException If {@code connectionString} is {@code null}. */ public BlobServiceClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, logger); StorageEndpoint endpoint = storageConnectionString.getBlobEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw logger .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive blob service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. * * @param pipelinePolicy a pipeline policy * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public BlobServiceClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public BlobServiceClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default Storage whitelist log headers and query parameters. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated BlobServiceClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public BlobServiceClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link BlobServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link BlobServiceVersion} of the service to be used when making requests. * @return the updated BlobServiceClientBuilder object */ public BlobServiceClientBuilder serviceVersion(BlobServiceVersion version) { this.version = version; return this; } }
what happens if eventData already have the same property? Will it throw/override?
private EventData traceMessageSpan(EventData eventData) { Optional<Object> eventContextData = eventData.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return eventData; } else { Context eventSpanContext = tracerProvider.startSpan(eventData.getContext(), ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { eventData.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); eventData.addContext(SPAN_CONTEXT_KEY, eventSpanContext); } } return eventData; }
eventData.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString());
private EventData traceMessageSpan(EventData eventData) { Optional<Object> eventContextData = eventData.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return eventData; } else { Context eventSpanContext = tracerProvider.startSpan(eventData.getContext(), ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { eventData.getProperties().put(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); eventData.addContext(SPAN_CONTEXT_KEY, eventSpanContext); } } return eventData; }
class EventDataBatch { private final ClientLogger logger = new ClientLogger(EventDataBatch.class); private final Object lock = new Object(); private final int maxMessageSize; private final String partitionKey; private final ErrorContextProvider contextProvider; private final List<EventData> events; private final byte[] eventBytes; private final String partitionId; private int sizeInBytes; private final TracerProvider tracerProvider; EventDataBatch(int maxMessageSize, String partitionId, String partitionKey, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxMessageSize = maxMessageSize; this.partitionKey = partitionKey; this.partitionId = partitionId; this.contextProvider = contextProvider; this.events = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; } /** * Gets the number of {@link EventData events} in the batch. * * @return The number of {@link EventData events} in the batch. */ public int getSize() { return events.size(); } /** * Gets the size of the {@link EventDataBatch} in bytes. * * @return the size of the {@link EventDataBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link EventData eventData} to the batch. * * @param eventData The {@link EventData} to add to the batch. * @return {@code true} if the event could be added to the batch; {@code false} if the event was too large to fit in * the batch. * @throws IllegalArgumentException if {@code eventData} is {@code null}. * @throws AmqpException if {@code eventData} is larger than the maximum size of the {@link * EventDataBatch}. */ public boolean tryAdd(final EventData eventData) { if (eventData == null) { throw logger.logExceptionAsWarning(new IllegalArgumentException("eventData cannot be null")); } EventData event = tracerProvider.isEnabled() ? traceMessageSpan(eventData) : eventData; final int size; try { size = getSize(event, events.isEmpty()); } catch (BufferOverflowException exception) { throw logger.logExceptionAsWarning(new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } synchronized (lock) { if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; } this.events.add(event); return true; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the message. * * @param eventData The Event to add tracing span for. * @return the updated event data object. */ List<EventData> getEvents() { return events; } String getPartitionKey() { return partitionKey; } String getPartitionId() { return partitionId; } private int getSize(final EventData eventData, final boolean isFirst) { Objects.requireNonNull(eventData, "'eventData' cannot be null."); final Message amqpMessage = createAmqpMessage(eventData, partitionKey); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } /* * Creates the AMQP message represented by the event data */ private Message createAmqpMessage(EventData event, String partitionKey) { final Message message = Proton.message(); if (event.getProperties() != null && !event.getProperties().isEmpty()) { final ApplicationProperties applicationProperties = new ApplicationProperties(event.getProperties()); message.setApplicationProperties(applicationProperties); } if (event.getSystemProperties() != null) { event.getSystemProperties().forEach((key, value) -> { if (EventData.RESERVED_SYSTEM_PROPERTIES.contains(key)) { return; } final MessageConstant constant = MessageConstant.fromString(key); if (constant != null) { switch (constant) { case MESSAGE_ID: message.setMessageId(value); break; case USER_ID: message.setUserId((byte[]) value); break; case TO: message.setAddress((String) value); break; case SUBJECT: message.setSubject((String) value); break; case REPLY_TO: message.setReplyTo((String) value); break; case CORRELATION_ID: message.setCorrelationId(value); break; case CONTENT_TYPE: message.setContentType((String) value); break; case CONTENT_ENCODING: message.setContentEncoding((String) value); break; case ABSOLUTE_EXPIRY_TIME: message.setExpiryTime((long) value); break; case CREATION_TIME: message.setCreationTime((long) value); break; case GROUP_ID: message.setGroupId((String) value); break; case GROUP_SEQUENCE: message.setGroupSequence((long) value); break; case REPLY_TO_GROUP_ID: message.setReplyToGroupId((String) value); break; default: throw logger.logExceptionAsWarning(new IllegalArgumentException(String.format(Locale.US, "Property is not a recognized reserved property name: %s", key))); } } else { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(Symbol.getSymbol(key), value); message.setMessageAnnotations(messageAnnotations); } }); } if (partitionKey != null) { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } if (event.getBody() != null) { message.setBody(new Data(Binary.create(event.getBody()))); } return message; } }
class EventDataBatch { private final ClientLogger logger = new ClientLogger(EventDataBatch.class); private final Object lock = new Object(); private final int maxMessageSize; private final String partitionKey; private final ErrorContextProvider contextProvider; private final List<EventData> events; private final byte[] eventBytes; private final String partitionId; private int sizeInBytes; private final TracerProvider tracerProvider; EventDataBatch(int maxMessageSize, String partitionId, String partitionKey, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxMessageSize = maxMessageSize; this.partitionKey = partitionKey; this.partitionId = partitionId; this.contextProvider = contextProvider; this.events = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; } /** * Gets the number of {@link EventData events} in the batch. * * @return The number of {@link EventData events} in the batch. */ public int getCount() { return events.size(); } /** * Gets the size of the {@link EventDataBatch} in bytes. * * @return the size of the {@link EventDataBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link EventData event} to the batch. * * @param eventData The {@link EventData} to add to the batch. * @return {@code true} if the event could be added to the batch; {@code false} if the event was too large to fit in * the batch. * @throws IllegalArgumentException if {@code eventData} is {@code null}. * @throws AmqpException if {@code eventData} is larger than the maximum size of the {@link EventDataBatch}. */ public boolean tryAdd(final EventData eventData) { if (eventData == null) { throw logger.logExceptionAsWarning(new IllegalArgumentException("eventData cannot be null")); } EventData event = tracerProvider.isEnabled() ? traceMessageSpan(eventData) : eventData; final int size; try { size = getSize(event, events.isEmpty()); } catch (BufferOverflowException exception) { throw logger.logExceptionAsWarning(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } synchronized (lock) { if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; } this.events.add(event); return true; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the message. * * @param eventData The Event to add tracing span for. * @return the updated event data object. */ List<EventData> getEvents() { return events; } String getPartitionKey() { return partitionKey; } String getPartitionId() { return partitionId; } private int getSize(final EventData eventData, final boolean isFirst) { Objects.requireNonNull(eventData, "'eventData' cannot be null."); final Message amqpMessage = createAmqpMessage(eventData, partitionKey); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } /* * Creates the AMQP message represented by the event data */ private Message createAmqpMessage(EventData event, String partitionKey) { final Message message = Proton.message(); if (event.getProperties() != null && !event.getProperties().isEmpty()) { final ApplicationProperties applicationProperties = new ApplicationProperties(event.getProperties()); message.setApplicationProperties(applicationProperties); } if (event.getSystemProperties() != null) { event.getSystemProperties().forEach((key, value) -> { if (EventData.RESERVED_SYSTEM_PROPERTIES.contains(key)) { return; } final MessageConstant constant = MessageConstant.fromString(key); if (constant != null) { switch (constant) { case MESSAGE_ID: message.setMessageId(value); break; case USER_ID: message.setUserId((byte[]) value); break; case TO: message.setAddress((String) value); break; case SUBJECT: message.setSubject((String) value); break; case REPLY_TO: message.setReplyTo((String) value); break; case CORRELATION_ID: message.setCorrelationId(value); break; case CONTENT_TYPE: message.setContentType((String) value); break; case CONTENT_ENCODING: message.setContentEncoding((String) value); break; case ABSOLUTE_EXPIRY_TIME: message.setExpiryTime((long) value); break; case CREATION_TIME: message.setCreationTime((long) value); break; case GROUP_ID: message.setGroupId((String) value); break; case GROUP_SEQUENCE: message.setGroupSequence((long) value); break; case REPLY_TO_GROUP_ID: message.setReplyToGroupId((String) value); break; default: throw logger.logExceptionAsWarning(new IllegalArgumentException(String.format(Locale.US, "Property is not a recognized reserved property name: %s", key))); } } else { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(Symbol.getSymbol(key), value); message.setMessageAnnotations(messageAnnotations); } }); } if (partitionKey != null) { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } message.setBody(new Data(new Binary(event.getBody()))); return message; } }
I think the `properties` on eventData is a hashmap so it would just override. @conniey ^^
private EventData traceMessageSpan(EventData eventData) { Optional<Object> eventContextData = eventData.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return eventData; } else { Context eventSpanContext = tracerProvider.startSpan(eventData.getContext(), ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { eventData.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); eventData.addContext(SPAN_CONTEXT_KEY, eventSpanContext); } } return eventData; }
eventData.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString());
private EventData traceMessageSpan(EventData eventData) { Optional<Object> eventContextData = eventData.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return eventData; } else { Context eventSpanContext = tracerProvider.startSpan(eventData.getContext(), ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { eventData.getProperties().put(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); eventData.addContext(SPAN_CONTEXT_KEY, eventSpanContext); } } return eventData; }
class EventDataBatch { private final ClientLogger logger = new ClientLogger(EventDataBatch.class); private final Object lock = new Object(); private final int maxMessageSize; private final String partitionKey; private final ErrorContextProvider contextProvider; private final List<EventData> events; private final byte[] eventBytes; private final String partitionId; private int sizeInBytes; private final TracerProvider tracerProvider; EventDataBatch(int maxMessageSize, String partitionId, String partitionKey, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxMessageSize = maxMessageSize; this.partitionKey = partitionKey; this.partitionId = partitionId; this.contextProvider = contextProvider; this.events = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; } /** * Gets the number of {@link EventData events} in the batch. * * @return The number of {@link EventData events} in the batch. */ public int getSize() { return events.size(); } /** * Gets the size of the {@link EventDataBatch} in bytes. * * @return the size of the {@link EventDataBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link EventData eventData} to the batch. * * @param eventData The {@link EventData} to add to the batch. * @return {@code true} if the event could be added to the batch; {@code false} if the event was too large to fit in * the batch. * @throws IllegalArgumentException if {@code eventData} is {@code null}. * @throws AmqpException if {@code eventData} is larger than the maximum size of the {@link * EventDataBatch}. */ public boolean tryAdd(final EventData eventData) { if (eventData == null) { throw logger.logExceptionAsWarning(new IllegalArgumentException("eventData cannot be null")); } EventData event = tracerProvider.isEnabled() ? traceMessageSpan(eventData) : eventData; final int size; try { size = getSize(event, events.isEmpty()); } catch (BufferOverflowException exception) { throw logger.logExceptionAsWarning(new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } synchronized (lock) { if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; } this.events.add(event); return true; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the message. * * @param eventData The Event to add tracing span for. * @return the updated event data object. */ List<EventData> getEvents() { return events; } String getPartitionKey() { return partitionKey; } String getPartitionId() { return partitionId; } private int getSize(final EventData eventData, final boolean isFirst) { Objects.requireNonNull(eventData, "'eventData' cannot be null."); final Message amqpMessage = createAmqpMessage(eventData, partitionKey); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } /* * Creates the AMQP message represented by the event data */ private Message createAmqpMessage(EventData event, String partitionKey) { final Message message = Proton.message(); if (event.getProperties() != null && !event.getProperties().isEmpty()) { final ApplicationProperties applicationProperties = new ApplicationProperties(event.getProperties()); message.setApplicationProperties(applicationProperties); } if (event.getSystemProperties() != null) { event.getSystemProperties().forEach((key, value) -> { if (EventData.RESERVED_SYSTEM_PROPERTIES.contains(key)) { return; } final MessageConstant constant = MessageConstant.fromString(key); if (constant != null) { switch (constant) { case MESSAGE_ID: message.setMessageId(value); break; case USER_ID: message.setUserId((byte[]) value); break; case TO: message.setAddress((String) value); break; case SUBJECT: message.setSubject((String) value); break; case REPLY_TO: message.setReplyTo((String) value); break; case CORRELATION_ID: message.setCorrelationId(value); break; case CONTENT_TYPE: message.setContentType((String) value); break; case CONTENT_ENCODING: message.setContentEncoding((String) value); break; case ABSOLUTE_EXPIRY_TIME: message.setExpiryTime((long) value); break; case CREATION_TIME: message.setCreationTime((long) value); break; case GROUP_ID: message.setGroupId((String) value); break; case GROUP_SEQUENCE: message.setGroupSequence((long) value); break; case REPLY_TO_GROUP_ID: message.setReplyToGroupId((String) value); break; default: throw logger.logExceptionAsWarning(new IllegalArgumentException(String.format(Locale.US, "Property is not a recognized reserved property name: %s", key))); } } else { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(Symbol.getSymbol(key), value); message.setMessageAnnotations(messageAnnotations); } }); } if (partitionKey != null) { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } if (event.getBody() != null) { message.setBody(new Data(Binary.create(event.getBody()))); } return message; } }
class EventDataBatch { private final ClientLogger logger = new ClientLogger(EventDataBatch.class); private final Object lock = new Object(); private final int maxMessageSize; private final String partitionKey; private final ErrorContextProvider contextProvider; private final List<EventData> events; private final byte[] eventBytes; private final String partitionId; private int sizeInBytes; private final TracerProvider tracerProvider; EventDataBatch(int maxMessageSize, String partitionId, String partitionKey, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxMessageSize = maxMessageSize; this.partitionKey = partitionKey; this.partitionId = partitionId; this.contextProvider = contextProvider; this.events = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; } /** * Gets the number of {@link EventData events} in the batch. * * @return The number of {@link EventData events} in the batch. */ public int getCount() { return events.size(); } /** * Gets the size of the {@link EventDataBatch} in bytes. * * @return the size of the {@link EventDataBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link EventData event} to the batch. * * @param eventData The {@link EventData} to add to the batch. * @return {@code true} if the event could be added to the batch; {@code false} if the event was too large to fit in * the batch. * @throws IllegalArgumentException if {@code eventData} is {@code null}. * @throws AmqpException if {@code eventData} is larger than the maximum size of the {@link EventDataBatch}. */ public boolean tryAdd(final EventData eventData) { if (eventData == null) { throw logger.logExceptionAsWarning(new IllegalArgumentException("eventData cannot be null")); } EventData event = tracerProvider.isEnabled() ? traceMessageSpan(eventData) : eventData; final int size; try { size = getSize(event, events.isEmpty()); } catch (BufferOverflowException exception) { throw logger.logExceptionAsWarning(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } synchronized (lock) { if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; } this.events.add(event); return true; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the message. * * @param eventData The Event to add tracing span for. * @return the updated event data object. */ List<EventData> getEvents() { return events; } String getPartitionKey() { return partitionKey; } String getPartitionId() { return partitionId; } private int getSize(final EventData eventData, final boolean isFirst) { Objects.requireNonNull(eventData, "'eventData' cannot be null."); final Message amqpMessage = createAmqpMessage(eventData, partitionKey); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } /* * Creates the AMQP message represented by the event data */ private Message createAmqpMessage(EventData event, String partitionKey) { final Message message = Proton.message(); if (event.getProperties() != null && !event.getProperties().isEmpty()) { final ApplicationProperties applicationProperties = new ApplicationProperties(event.getProperties()); message.setApplicationProperties(applicationProperties); } if (event.getSystemProperties() != null) { event.getSystemProperties().forEach((key, value) -> { if (EventData.RESERVED_SYSTEM_PROPERTIES.contains(key)) { return; } final MessageConstant constant = MessageConstant.fromString(key); if (constant != null) { switch (constant) { case MESSAGE_ID: message.setMessageId(value); break; case USER_ID: message.setUserId((byte[]) value); break; case TO: message.setAddress((String) value); break; case SUBJECT: message.setSubject((String) value); break; case REPLY_TO: message.setReplyTo((String) value); break; case CORRELATION_ID: message.setCorrelationId(value); break; case CONTENT_TYPE: message.setContentType((String) value); break; case CONTENT_ENCODING: message.setContentEncoding((String) value); break; case ABSOLUTE_EXPIRY_TIME: message.setExpiryTime((long) value); break; case CREATION_TIME: message.setCreationTime((long) value); break; case GROUP_ID: message.setGroupId((String) value); break; case GROUP_SEQUENCE: message.setGroupSequence((long) value); break; case REPLY_TO_GROUP_ID: message.setReplyToGroupId((String) value); break; default: throw logger.logExceptionAsWarning(new IllegalArgumentException(String.format(Locale.US, "Property is not a recognized reserved property name: %s", key))); } } else { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(Symbol.getSymbol(key), value); message.setMessageAnnotations(messageAnnotations); } }); } if (partitionKey != null) { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } message.setBody(new Data(new Binary(event.getBody()))); return message; } }
Override
private EventData traceMessageSpan(EventData eventData) { Optional<Object> eventContextData = eventData.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return eventData; } else { Context eventSpanContext = tracerProvider.startSpan(eventData.getContext(), ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { eventData.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); eventData.addContext(SPAN_CONTEXT_KEY, eventSpanContext); } } return eventData; }
eventData.addProperty(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString());
private EventData traceMessageSpan(EventData eventData) { Optional<Object> eventContextData = eventData.getContext().getData(SPAN_CONTEXT_KEY); if (eventContextData.isPresent()) { return eventData; } else { Context eventSpanContext = tracerProvider.startSpan(eventData.getContext(), ProcessKind.MESSAGE); Optional<Object> eventDiagnosticIdOptional = eventSpanContext.getData(DIAGNOSTIC_ID_KEY); if (eventDiagnosticIdOptional.isPresent()) { eventData.getProperties().put(DIAGNOSTIC_ID_KEY, eventDiagnosticIdOptional.get().toString()); tracerProvider.endSpan(eventSpanContext, Signal.complete()); eventData.addContext(SPAN_CONTEXT_KEY, eventSpanContext); } } return eventData; }
class EventDataBatch { private final ClientLogger logger = new ClientLogger(EventDataBatch.class); private final Object lock = new Object(); private final int maxMessageSize; private final String partitionKey; private final ErrorContextProvider contextProvider; private final List<EventData> events; private final byte[] eventBytes; private final String partitionId; private int sizeInBytes; private final TracerProvider tracerProvider; EventDataBatch(int maxMessageSize, String partitionId, String partitionKey, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxMessageSize = maxMessageSize; this.partitionKey = partitionKey; this.partitionId = partitionId; this.contextProvider = contextProvider; this.events = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; } /** * Gets the number of {@link EventData events} in the batch. * * @return The number of {@link EventData events} in the batch. */ public int getSize() { return events.size(); } /** * Gets the size of the {@link EventDataBatch} in bytes. * * @return the size of the {@link EventDataBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link EventData eventData} to the batch. * * @param eventData The {@link EventData} to add to the batch. * @return {@code true} if the event could be added to the batch; {@code false} if the event was too large to fit in * the batch. * @throws IllegalArgumentException if {@code eventData} is {@code null}. * @throws AmqpException if {@code eventData} is larger than the maximum size of the {@link * EventDataBatch}. */ public boolean tryAdd(final EventData eventData) { if (eventData == null) { throw logger.logExceptionAsWarning(new IllegalArgumentException("eventData cannot be null")); } EventData event = tracerProvider.isEnabled() ? traceMessageSpan(eventData) : eventData; final int size; try { size = getSize(event, events.isEmpty()); } catch (BufferOverflowException exception) { throw logger.logExceptionAsWarning(new AmqpException(false, ErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } synchronized (lock) { if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; } this.events.add(event); return true; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the message. * * @param eventData The Event to add tracing span for. * @return the updated event data object. */ List<EventData> getEvents() { return events; } String getPartitionKey() { return partitionKey; } String getPartitionId() { return partitionId; } private int getSize(final EventData eventData, final boolean isFirst) { Objects.requireNonNull(eventData, "'eventData' cannot be null."); final Message amqpMessage = createAmqpMessage(eventData, partitionKey); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } /* * Creates the AMQP message represented by the event data */ private Message createAmqpMessage(EventData event, String partitionKey) { final Message message = Proton.message(); if (event.getProperties() != null && !event.getProperties().isEmpty()) { final ApplicationProperties applicationProperties = new ApplicationProperties(event.getProperties()); message.setApplicationProperties(applicationProperties); } if (event.getSystemProperties() != null) { event.getSystemProperties().forEach((key, value) -> { if (EventData.RESERVED_SYSTEM_PROPERTIES.contains(key)) { return; } final MessageConstant constant = MessageConstant.fromString(key); if (constant != null) { switch (constant) { case MESSAGE_ID: message.setMessageId(value); break; case USER_ID: message.setUserId((byte[]) value); break; case TO: message.setAddress((String) value); break; case SUBJECT: message.setSubject((String) value); break; case REPLY_TO: message.setReplyTo((String) value); break; case CORRELATION_ID: message.setCorrelationId(value); break; case CONTENT_TYPE: message.setContentType((String) value); break; case CONTENT_ENCODING: message.setContentEncoding((String) value); break; case ABSOLUTE_EXPIRY_TIME: message.setExpiryTime((long) value); break; case CREATION_TIME: message.setCreationTime((long) value); break; case GROUP_ID: message.setGroupId((String) value); break; case GROUP_SEQUENCE: message.setGroupSequence((long) value); break; case REPLY_TO_GROUP_ID: message.setReplyToGroupId((String) value); break; default: throw logger.logExceptionAsWarning(new IllegalArgumentException(String.format(Locale.US, "Property is not a recognized reserved property name: %s", key))); } } else { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(Symbol.getSymbol(key), value); message.setMessageAnnotations(messageAnnotations); } }); } if (partitionKey != null) { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } if (event.getBody() != null) { message.setBody(new Data(Binary.create(event.getBody()))); } return message; } }
class EventDataBatch { private final ClientLogger logger = new ClientLogger(EventDataBatch.class); private final Object lock = new Object(); private final int maxMessageSize; private final String partitionKey; private final ErrorContextProvider contextProvider; private final List<EventData> events; private final byte[] eventBytes; private final String partitionId; private int sizeInBytes; private final TracerProvider tracerProvider; EventDataBatch(int maxMessageSize, String partitionId, String partitionKey, ErrorContextProvider contextProvider, TracerProvider tracerProvider) { this.maxMessageSize = maxMessageSize; this.partitionKey = partitionKey; this.partitionId = partitionId; this.contextProvider = contextProvider; this.events = new LinkedList<>(); this.sizeInBytes = (maxMessageSize / 65536) * 1024; this.eventBytes = new byte[maxMessageSize]; this.tracerProvider = tracerProvider; } /** * Gets the number of {@link EventData events} in the batch. * * @return The number of {@link EventData events} in the batch. */ public int getCount() { return events.size(); } /** * Gets the size of the {@link EventDataBatch} in bytes. * * @return the size of the {@link EventDataBatch} in bytes. */ public int getSizeInBytes() { return this.sizeInBytes; } /** * Tries to add an {@link EventData event} to the batch. * * @param eventData The {@link EventData} to add to the batch. * @return {@code true} if the event could be added to the batch; {@code false} if the event was too large to fit in * the batch. * @throws IllegalArgumentException if {@code eventData} is {@code null}. * @throws AmqpException if {@code eventData} is larger than the maximum size of the {@link EventDataBatch}. */ public boolean tryAdd(final EventData eventData) { if (eventData == null) { throw logger.logExceptionAsWarning(new IllegalArgumentException("eventData cannot be null")); } EventData event = tracerProvider.isEnabled() ? traceMessageSpan(eventData) : eventData; final int size; try { size = getSize(event, events.isEmpty()); } catch (BufferOverflowException exception) { throw logger.logExceptionAsWarning(new AmqpException(false, AmqpErrorCondition.LINK_PAYLOAD_SIZE_EXCEEDED, String.format(Locale.US, "Size of the payload exceeded maximum message size: %s kb", maxMessageSize / 1024), contextProvider.getErrorContext())); } synchronized (lock) { if (this.sizeInBytes + size > this.maxMessageSize) { return false; } this.sizeInBytes += size; } this.events.add(event); return true; } /** * Method to start and end a "Azure.EventHubs.message" span and add the "DiagnosticId" as a property of the message. * * @param eventData The Event to add tracing span for. * @return the updated event data object. */ List<EventData> getEvents() { return events; } String getPartitionKey() { return partitionKey; } String getPartitionId() { return partitionId; } private int getSize(final EventData eventData, final boolean isFirst) { Objects.requireNonNull(eventData, "'eventData' cannot be null."); final Message amqpMessage = createAmqpMessage(eventData, partitionKey); int eventSize = amqpMessage.encode(this.eventBytes, 0, maxMessageSize); eventSize += 16; if (isFirst) { amqpMessage.setBody(null); amqpMessage.setApplicationProperties(null); amqpMessage.setProperties(null); amqpMessage.setDeliveryAnnotations(null); eventSize += amqpMessage.encode(this.eventBytes, 0, maxMessageSize); } return eventSize; } /* * Creates the AMQP message represented by the event data */ private Message createAmqpMessage(EventData event, String partitionKey) { final Message message = Proton.message(); if (event.getProperties() != null && !event.getProperties().isEmpty()) { final ApplicationProperties applicationProperties = new ApplicationProperties(event.getProperties()); message.setApplicationProperties(applicationProperties); } if (event.getSystemProperties() != null) { event.getSystemProperties().forEach((key, value) -> { if (EventData.RESERVED_SYSTEM_PROPERTIES.contains(key)) { return; } final MessageConstant constant = MessageConstant.fromString(key); if (constant != null) { switch (constant) { case MESSAGE_ID: message.setMessageId(value); break; case USER_ID: message.setUserId((byte[]) value); break; case TO: message.setAddress((String) value); break; case SUBJECT: message.setSubject((String) value); break; case REPLY_TO: message.setReplyTo((String) value); break; case CORRELATION_ID: message.setCorrelationId(value); break; case CONTENT_TYPE: message.setContentType((String) value); break; case CONTENT_ENCODING: message.setContentEncoding((String) value); break; case ABSOLUTE_EXPIRY_TIME: message.setExpiryTime((long) value); break; case CREATION_TIME: message.setCreationTime((long) value); break; case GROUP_ID: message.setGroupId((String) value); break; case GROUP_SEQUENCE: message.setGroupSequence((long) value); break; case REPLY_TO_GROUP_ID: message.setReplyToGroupId((String) value); break; default: throw logger.logExceptionAsWarning(new IllegalArgumentException(String.format(Locale.US, "Property is not a recognized reserved property name: %s", key))); } } else { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(Symbol.getSymbol(key), value); message.setMessageAnnotations(messageAnnotations); } }); } if (partitionKey != null) { final MessageAnnotations messageAnnotations = (message.getMessageAnnotations() == null) ? new MessageAnnotations(new HashMap<>()) : message.getMessageAnnotations(); messageAnnotations.getValue().put(AmqpConstants.PARTITION_KEY, partitionKey); message.setMessageAnnotations(messageAnnotations); } message.setBody(new Data(new Binary(event.getBody()))); return message; } }
same as above
private BlobContainerClientBuilder getBlobContainerClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = BlobUrlParts.parse( DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs")).toUrl().toString(); return new BlobContainerClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); }
DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs")).toUrl().toString();
private BlobContainerClientBuilder getBlobContainerClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs"); return new BlobContainerClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); }
class DataLakeLeaseClientBuilder { final BlobLeaseClientBuilder blobLeaseClientBuilder; /** * Creates a new instance of {@link DataLakeLeaseClientBuilder}. */ public DataLakeLeaseClientBuilder() { blobLeaseClientBuilder = new BlobLeaseClientBuilder(); } /** * Creates a {@link DataLakeLeaseClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseClient} based on the configurations in this builder. */ public DataLakeLeaseClient buildClient() { return new DataLakeLeaseClient(blobLeaseClientBuilder.buildClient()); } /** * Creates a {@link DataLakeLeaseAsyncClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseAsyncClient} based on the configurations in this builder. */ public DataLakeLeaseAsyncClient buildAsyncClient() { return new DataLakeLeaseAsyncClient(blobLeaseClientBuilder.buildAsyncClient()); } /** * Configures the builder based on the passed {@link DataLakeFileClient}. This will set the {@link HttpPipeline} and * {@link URL} that are used to interact with the service. * * @param dataLakeFileClient DataLakeFileClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileClient(DataLakeFileClient dataLakeFileClient) { Objects.requireNonNull(dataLakeFileClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeFileClient.getFileUrl(), dataLakeFileClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileAsyncClient DataLakeFileAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileAsyncClient(DataLakeFileAsyncClient dataLakeFileAsyncClient) { Objects.requireNonNull(dataLakeFileAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeFileAsyncClient.getFileUrl(), dataLakeFileAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryClient DataLakeDirectoryClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryClient(DataLakeDirectoryClient dataLakeDirectoryClient) { Objects.requireNonNull(dataLakeDirectoryClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeDirectoryClient.getDirectoryUrl(), dataLakeDirectoryClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryAsyncClient DataLakeDirectoryAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryAsyncClient(DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient) { Objects.requireNonNull(dataLakeDirectoryAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeDirectoryAsyncClient.getDirectoryUrl(), dataLakeDirectoryAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemClient DataLakeFileSystemClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemClient(DataLakeFileSystemClient dataLakeFileSystemClient) { Objects.requireNonNull(dataLakeFileSystemClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemClient.getFileSystemUrl(), dataLakeFileSystemClient.getHttpPipeline()).buildClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemAsyncClient}. This will set the {@link * HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemAsyncClient DataLakeFileSystemAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemAsyncClient( DataLakeFileSystemAsyncClient dataLakeFileSystemAsyncClient) { Objects.requireNonNull(dataLakeFileSystemAsyncClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemAsyncClient.getFileSystemUrl(), dataLakeFileSystemAsyncClient.getHttpPipeline()).buildClient()); return this; } /** * Sets the identifier for the lease. * * <p>If a lease ID isn't set then a {@link UUID} will be used.</p> * * @param leaseId Identifier for the lease. * @return the updated DataLakeLeaseClientBuilder object */ public DataLakeLeaseClientBuilder leaseId(String leaseId) { blobLeaseClientBuilder.leaseId(leaseId); return this; } /** * Initializes a {@link SpecializedBlobClientBuilder} * @param dfsEndpoint The endpoint for the {@link SpecializedBlobClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link SpecializedBlobClientBuilder} * @return the {@link SpecializedBlobClientBuilder} */ private SpecializedBlobClientBuilder getSpecializedBlobClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = BlobUrlParts.parse( DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs")).toUrl().toString(); return new SpecializedBlobClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); } /** * Initializes a {@link BlobContainerClientBuilder} * @param dfsEndpoint The endpoint for the {@link BlobContainerClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link BlobContainerClientBuilder} * @return the {@link BlobContainerClientBuilder} */ }
class DataLakeLeaseClientBuilder { final BlobLeaseClientBuilder blobLeaseClientBuilder; /** * Creates a new instance of {@link DataLakeLeaseClientBuilder}. */ public DataLakeLeaseClientBuilder() { blobLeaseClientBuilder = new BlobLeaseClientBuilder(); } /** * Creates a {@link DataLakeLeaseClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseClient} based on the configurations in this builder. */ public DataLakeLeaseClient buildClient() { return new DataLakeLeaseClient(blobLeaseClientBuilder.buildClient()); } /** * Creates a {@link DataLakeLeaseAsyncClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseAsyncClient} based on the configurations in this builder. */ public DataLakeLeaseAsyncClient buildAsyncClient() { return new DataLakeLeaseAsyncClient(blobLeaseClientBuilder.buildAsyncClient()); } /** * Configures the builder based on the passed {@link DataLakeFileClient}. This will set the {@link HttpPipeline} and * {@link URL} that are used to interact with the service. * * @param dataLakeFileClient DataLakeFileClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileClient(DataLakeFileClient dataLakeFileClient) { Objects.requireNonNull(dataLakeFileClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeFileClient.getFileUrl(), dataLakeFileClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileAsyncClient DataLakeFileAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileAsyncClient(DataLakeFileAsyncClient dataLakeFileAsyncClient) { Objects.requireNonNull(dataLakeFileAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeFileAsyncClient.getFileUrl(), dataLakeFileAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryClient DataLakeDirectoryClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryClient(DataLakeDirectoryClient dataLakeDirectoryClient) { Objects.requireNonNull(dataLakeDirectoryClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeDirectoryClient.getDirectoryUrl(), dataLakeDirectoryClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryAsyncClient DataLakeDirectoryAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryAsyncClient(DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient) { Objects.requireNonNull(dataLakeDirectoryAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeDirectoryAsyncClient.getDirectoryUrl(), dataLakeDirectoryAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemClient DataLakeFileSystemClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemClient(DataLakeFileSystemClient dataLakeFileSystemClient) { Objects.requireNonNull(dataLakeFileSystemClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemClient.getFileSystemUrl(), dataLakeFileSystemClient.getHttpPipeline()).buildClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemAsyncClient}. This will set the {@link * HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemAsyncClient DataLakeFileSystemAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemAsyncClient( DataLakeFileSystemAsyncClient dataLakeFileSystemAsyncClient) { Objects.requireNonNull(dataLakeFileSystemAsyncClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemAsyncClient.getFileSystemUrl(), dataLakeFileSystemAsyncClient.getHttpPipeline()).buildClient()); return this; } /** * Sets the identifier for the lease. * * <p>If a lease ID isn't set then a {@link UUID} will be used.</p> * * @param leaseId Identifier for the lease. * @return the updated DataLakeLeaseClientBuilder object */ public DataLakeLeaseClientBuilder leaseId(String leaseId) { blobLeaseClientBuilder.leaseId(leaseId); return this; } /** * Initializes a {@link SpecializedBlobClientBuilder} * @param dfsEndpoint The endpoint for the {@link SpecializedBlobClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link SpecializedBlobClientBuilder} * @return the {@link SpecializedBlobClientBuilder} */ private SpecializedBlobClientBuilder getSpecializedBlobClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs"); return new SpecializedBlobClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); } /** * Initializes a {@link BlobContainerClientBuilder} * @param dfsEndpoint The endpoint for the {@link BlobContainerClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link BlobContainerClientBuilder} * @return the {@link BlobContainerClientBuilder} */ }
please don't call this a regex
public static String endpointToDesiredEndpoint(String endpoint, String desiredEndpoint, String currentEndpoint) { String desiredRegex = "." + desiredEndpoint + "."; String currentRegex = "." + currentEndpoint + "."; if (endpoint.contains(desiredRegex)) { return endpoint; } else { return endpoint.replaceFirst(currentRegex, desiredRegex); } }
String desiredRegex = "." + desiredEndpoint + ".";
public static String endpointToDesiredEndpoint(String endpoint, String desiredEndpoint, String currentEndpoint) { String desiredStringToMatch = "." + desiredEndpoint + "."; String currentStringToMatch = "." + currentEndpoint + "."; if (endpoint.contains(desiredStringToMatch)) { return endpoint; } else { return endpoint.replaceFirst(currentStringToMatch, desiredStringToMatch); } }
class DataLakeImplUtils { }
class DataLakeImplUtils { }
make this simple like the others.
private SpecializedBlobClientBuilder getSpecializedBlobClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = BlobUrlParts.parse( DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs")).toUrl().toString(); return new SpecializedBlobClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); }
DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs")).toUrl().toString();
private SpecializedBlobClientBuilder getSpecializedBlobClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs"); return new SpecializedBlobClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); }
class DataLakeLeaseClientBuilder { final BlobLeaseClientBuilder blobLeaseClientBuilder; /** * Creates a new instance of {@link DataLakeLeaseClientBuilder}. */ public DataLakeLeaseClientBuilder() { blobLeaseClientBuilder = new BlobLeaseClientBuilder(); } /** * Creates a {@link DataLakeLeaseClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseClient} based on the configurations in this builder. */ public DataLakeLeaseClient buildClient() { return new DataLakeLeaseClient(blobLeaseClientBuilder.buildClient()); } /** * Creates a {@link DataLakeLeaseAsyncClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseAsyncClient} based on the configurations in this builder. */ public DataLakeLeaseAsyncClient buildAsyncClient() { return new DataLakeLeaseAsyncClient(blobLeaseClientBuilder.buildAsyncClient()); } /** * Configures the builder based on the passed {@link DataLakeFileClient}. This will set the {@link HttpPipeline} and * {@link URL} that are used to interact with the service. * * @param dataLakeFileClient DataLakeFileClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileClient(DataLakeFileClient dataLakeFileClient) { Objects.requireNonNull(dataLakeFileClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeFileClient.getFileUrl(), dataLakeFileClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileAsyncClient DataLakeFileAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileAsyncClient(DataLakeFileAsyncClient dataLakeFileAsyncClient) { Objects.requireNonNull(dataLakeFileAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeFileAsyncClient.getFileUrl(), dataLakeFileAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryClient DataLakeDirectoryClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryClient(DataLakeDirectoryClient dataLakeDirectoryClient) { Objects.requireNonNull(dataLakeDirectoryClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeDirectoryClient.getDirectoryUrl(), dataLakeDirectoryClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryAsyncClient DataLakeDirectoryAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryAsyncClient(DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient) { Objects.requireNonNull(dataLakeDirectoryAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeDirectoryAsyncClient.getDirectoryUrl(), dataLakeDirectoryAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemClient DataLakeFileSystemClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemClient(DataLakeFileSystemClient dataLakeFileSystemClient) { Objects.requireNonNull(dataLakeFileSystemClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemClient.getFileSystemUrl(), dataLakeFileSystemClient.getHttpPipeline()).buildClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemAsyncClient}. This will set the {@link * HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemAsyncClient DataLakeFileSystemAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemAsyncClient( DataLakeFileSystemAsyncClient dataLakeFileSystemAsyncClient) { Objects.requireNonNull(dataLakeFileSystemAsyncClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemAsyncClient.getFileSystemUrl(), dataLakeFileSystemAsyncClient.getHttpPipeline()).buildClient()); return this; } /** * Sets the identifier for the lease. * * <p>If a lease ID isn't set then a {@link UUID} will be used.</p> * * @param leaseId Identifier for the lease. * @return the updated DataLakeLeaseClientBuilder object */ public DataLakeLeaseClientBuilder leaseId(String leaseId) { blobLeaseClientBuilder.leaseId(leaseId); return this; } /** * Initializes a {@link SpecializedBlobClientBuilder} * @param dfsEndpoint The endpoint for the {@link SpecializedBlobClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link SpecializedBlobClientBuilder} * @return the {@link SpecializedBlobClientBuilder} */ /** * Initializes a {@link BlobContainerClientBuilder} * @param dfsEndpoint The endpoint for the {@link BlobContainerClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link BlobContainerClientBuilder} * @return the {@link BlobContainerClientBuilder} */ private BlobContainerClientBuilder getBlobContainerClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = BlobUrlParts.parse( DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs")).toUrl().toString(); return new BlobContainerClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); } }
class DataLakeLeaseClientBuilder { final BlobLeaseClientBuilder blobLeaseClientBuilder; /** * Creates a new instance of {@link DataLakeLeaseClientBuilder}. */ public DataLakeLeaseClientBuilder() { blobLeaseClientBuilder = new BlobLeaseClientBuilder(); } /** * Creates a {@link DataLakeLeaseClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseClient} based on the configurations in this builder. */ public DataLakeLeaseClient buildClient() { return new DataLakeLeaseClient(blobLeaseClientBuilder.buildClient()); } /** * Creates a {@link DataLakeLeaseAsyncClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseAsyncClient} based on the configurations in this builder. */ public DataLakeLeaseAsyncClient buildAsyncClient() { return new DataLakeLeaseAsyncClient(blobLeaseClientBuilder.buildAsyncClient()); } /** * Configures the builder based on the passed {@link DataLakeFileClient}. This will set the {@link HttpPipeline} and * {@link URL} that are used to interact with the service. * * @param dataLakeFileClient DataLakeFileClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileClient(DataLakeFileClient dataLakeFileClient) { Objects.requireNonNull(dataLakeFileClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeFileClient.getFileUrl(), dataLakeFileClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileAsyncClient DataLakeFileAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileAsyncClient(DataLakeFileAsyncClient dataLakeFileAsyncClient) { Objects.requireNonNull(dataLakeFileAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeFileAsyncClient.getFileUrl(), dataLakeFileAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryClient DataLakeDirectoryClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryClient(DataLakeDirectoryClient dataLakeDirectoryClient) { Objects.requireNonNull(dataLakeDirectoryClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeDirectoryClient.getDirectoryUrl(), dataLakeDirectoryClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryAsyncClient DataLakeDirectoryAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryAsyncClient(DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient) { Objects.requireNonNull(dataLakeDirectoryAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeDirectoryAsyncClient.getDirectoryUrl(), dataLakeDirectoryAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemClient DataLakeFileSystemClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemClient(DataLakeFileSystemClient dataLakeFileSystemClient) { Objects.requireNonNull(dataLakeFileSystemClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemClient.getFileSystemUrl(), dataLakeFileSystemClient.getHttpPipeline()).buildClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemAsyncClient}. This will set the {@link * HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemAsyncClient DataLakeFileSystemAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemAsyncClient( DataLakeFileSystemAsyncClient dataLakeFileSystemAsyncClient) { Objects.requireNonNull(dataLakeFileSystemAsyncClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemAsyncClient.getFileSystemUrl(), dataLakeFileSystemAsyncClient.getHttpPipeline()).buildClient()); return this; } /** * Sets the identifier for the lease. * * <p>If a lease ID isn't set then a {@link UUID} will be used.</p> * * @param leaseId Identifier for the lease. * @return the updated DataLakeLeaseClientBuilder object */ public DataLakeLeaseClientBuilder leaseId(String leaseId) { blobLeaseClientBuilder.leaseId(leaseId); return this; } /** * Initializes a {@link SpecializedBlobClientBuilder} * @param dfsEndpoint The endpoint for the {@link SpecializedBlobClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link SpecializedBlobClientBuilder} * @return the {@link SpecializedBlobClientBuilder} */ /** * Initializes a {@link BlobContainerClientBuilder} * @param dfsEndpoint The endpoint for the {@link BlobContainerClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link BlobContainerClientBuilder} * @return the {@link BlobContainerClientBuilder} */ private BlobContainerClientBuilder getBlobContainerClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs"); return new BlobContainerClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); } }
done
private SpecializedBlobClientBuilder getSpecializedBlobClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = BlobUrlParts.parse( DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs")).toUrl().toString(); return new SpecializedBlobClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); }
DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs")).toUrl().toString();
private SpecializedBlobClientBuilder getSpecializedBlobClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs"); return new SpecializedBlobClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); }
class DataLakeLeaseClientBuilder { final BlobLeaseClientBuilder blobLeaseClientBuilder; /** * Creates a new instance of {@link DataLakeLeaseClientBuilder}. */ public DataLakeLeaseClientBuilder() { blobLeaseClientBuilder = new BlobLeaseClientBuilder(); } /** * Creates a {@link DataLakeLeaseClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseClient} based on the configurations in this builder. */ public DataLakeLeaseClient buildClient() { return new DataLakeLeaseClient(blobLeaseClientBuilder.buildClient()); } /** * Creates a {@link DataLakeLeaseAsyncClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseAsyncClient} based on the configurations in this builder. */ public DataLakeLeaseAsyncClient buildAsyncClient() { return new DataLakeLeaseAsyncClient(blobLeaseClientBuilder.buildAsyncClient()); } /** * Configures the builder based on the passed {@link DataLakeFileClient}. This will set the {@link HttpPipeline} and * {@link URL} that are used to interact with the service. * * @param dataLakeFileClient DataLakeFileClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileClient(DataLakeFileClient dataLakeFileClient) { Objects.requireNonNull(dataLakeFileClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeFileClient.getFileUrl(), dataLakeFileClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileAsyncClient DataLakeFileAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileAsyncClient(DataLakeFileAsyncClient dataLakeFileAsyncClient) { Objects.requireNonNull(dataLakeFileAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeFileAsyncClient.getFileUrl(), dataLakeFileAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryClient DataLakeDirectoryClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryClient(DataLakeDirectoryClient dataLakeDirectoryClient) { Objects.requireNonNull(dataLakeDirectoryClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeDirectoryClient.getDirectoryUrl(), dataLakeDirectoryClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryAsyncClient DataLakeDirectoryAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryAsyncClient(DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient) { Objects.requireNonNull(dataLakeDirectoryAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeDirectoryAsyncClient.getDirectoryUrl(), dataLakeDirectoryAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemClient DataLakeFileSystemClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemClient(DataLakeFileSystemClient dataLakeFileSystemClient) { Objects.requireNonNull(dataLakeFileSystemClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemClient.getFileSystemUrl(), dataLakeFileSystemClient.getHttpPipeline()).buildClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemAsyncClient}. This will set the {@link * HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemAsyncClient DataLakeFileSystemAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemAsyncClient( DataLakeFileSystemAsyncClient dataLakeFileSystemAsyncClient) { Objects.requireNonNull(dataLakeFileSystemAsyncClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemAsyncClient.getFileSystemUrl(), dataLakeFileSystemAsyncClient.getHttpPipeline()).buildClient()); return this; } /** * Sets the identifier for the lease. * * <p>If a lease ID isn't set then a {@link UUID} will be used.</p> * * @param leaseId Identifier for the lease. * @return the updated DataLakeLeaseClientBuilder object */ public DataLakeLeaseClientBuilder leaseId(String leaseId) { blobLeaseClientBuilder.leaseId(leaseId); return this; } /** * Initializes a {@link SpecializedBlobClientBuilder} * @param dfsEndpoint The endpoint for the {@link SpecializedBlobClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link SpecializedBlobClientBuilder} * @return the {@link SpecializedBlobClientBuilder} */ /** * Initializes a {@link BlobContainerClientBuilder} * @param dfsEndpoint The endpoint for the {@link BlobContainerClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link BlobContainerClientBuilder} * @return the {@link BlobContainerClientBuilder} */ private BlobContainerClientBuilder getBlobContainerClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = BlobUrlParts.parse( DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs")).toUrl().toString(); return new BlobContainerClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); } }
class DataLakeLeaseClientBuilder { final BlobLeaseClientBuilder blobLeaseClientBuilder; /** * Creates a new instance of {@link DataLakeLeaseClientBuilder}. */ public DataLakeLeaseClientBuilder() { blobLeaseClientBuilder = new BlobLeaseClientBuilder(); } /** * Creates a {@link DataLakeLeaseClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseClient} based on the configurations in this builder. */ public DataLakeLeaseClient buildClient() { return new DataLakeLeaseClient(blobLeaseClientBuilder.buildClient()); } /** * Creates a {@link DataLakeLeaseAsyncClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseAsyncClient} based on the configurations in this builder. */ public DataLakeLeaseAsyncClient buildAsyncClient() { return new DataLakeLeaseAsyncClient(blobLeaseClientBuilder.buildAsyncClient()); } /** * Configures the builder based on the passed {@link DataLakeFileClient}. This will set the {@link HttpPipeline} and * {@link URL} that are used to interact with the service. * * @param dataLakeFileClient DataLakeFileClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileClient(DataLakeFileClient dataLakeFileClient) { Objects.requireNonNull(dataLakeFileClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeFileClient.getFileUrl(), dataLakeFileClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileAsyncClient DataLakeFileAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileAsyncClient(DataLakeFileAsyncClient dataLakeFileAsyncClient) { Objects.requireNonNull(dataLakeFileAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeFileAsyncClient.getFileUrl(), dataLakeFileAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryClient DataLakeDirectoryClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryClient(DataLakeDirectoryClient dataLakeDirectoryClient) { Objects.requireNonNull(dataLakeDirectoryClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeDirectoryClient.getDirectoryUrl(), dataLakeDirectoryClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryAsyncClient DataLakeDirectoryAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryAsyncClient(DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient) { Objects.requireNonNull(dataLakeDirectoryAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeDirectoryAsyncClient.getDirectoryUrl(), dataLakeDirectoryAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemClient DataLakeFileSystemClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemClient(DataLakeFileSystemClient dataLakeFileSystemClient) { Objects.requireNonNull(dataLakeFileSystemClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemClient.getFileSystemUrl(), dataLakeFileSystemClient.getHttpPipeline()).buildClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemAsyncClient}. This will set the {@link * HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemAsyncClient DataLakeFileSystemAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemAsyncClient( DataLakeFileSystemAsyncClient dataLakeFileSystemAsyncClient) { Objects.requireNonNull(dataLakeFileSystemAsyncClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemAsyncClient.getFileSystemUrl(), dataLakeFileSystemAsyncClient.getHttpPipeline()).buildClient()); return this; } /** * Sets the identifier for the lease. * * <p>If a lease ID isn't set then a {@link UUID} will be used.</p> * * @param leaseId Identifier for the lease. * @return the updated DataLakeLeaseClientBuilder object */ public DataLakeLeaseClientBuilder leaseId(String leaseId) { blobLeaseClientBuilder.leaseId(leaseId); return this; } /** * Initializes a {@link SpecializedBlobClientBuilder} * @param dfsEndpoint The endpoint for the {@link SpecializedBlobClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link SpecializedBlobClientBuilder} * @return the {@link SpecializedBlobClientBuilder} */ /** * Initializes a {@link BlobContainerClientBuilder} * @param dfsEndpoint The endpoint for the {@link BlobContainerClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link BlobContainerClientBuilder} * @return the {@link BlobContainerClientBuilder} */ private BlobContainerClientBuilder getBlobContainerClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs"); return new BlobContainerClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); } }
done
private BlobContainerClientBuilder getBlobContainerClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = BlobUrlParts.parse( DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs")).toUrl().toString(); return new BlobContainerClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); }
DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs")).toUrl().toString();
private BlobContainerClientBuilder getBlobContainerClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs"); return new BlobContainerClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); }
class DataLakeLeaseClientBuilder { final BlobLeaseClientBuilder blobLeaseClientBuilder; /** * Creates a new instance of {@link DataLakeLeaseClientBuilder}. */ public DataLakeLeaseClientBuilder() { blobLeaseClientBuilder = new BlobLeaseClientBuilder(); } /** * Creates a {@link DataLakeLeaseClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseClient} based on the configurations in this builder. */ public DataLakeLeaseClient buildClient() { return new DataLakeLeaseClient(blobLeaseClientBuilder.buildClient()); } /** * Creates a {@link DataLakeLeaseAsyncClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseAsyncClient} based on the configurations in this builder. */ public DataLakeLeaseAsyncClient buildAsyncClient() { return new DataLakeLeaseAsyncClient(blobLeaseClientBuilder.buildAsyncClient()); } /** * Configures the builder based on the passed {@link DataLakeFileClient}. This will set the {@link HttpPipeline} and * {@link URL} that are used to interact with the service. * * @param dataLakeFileClient DataLakeFileClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileClient(DataLakeFileClient dataLakeFileClient) { Objects.requireNonNull(dataLakeFileClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeFileClient.getFileUrl(), dataLakeFileClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileAsyncClient DataLakeFileAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileAsyncClient(DataLakeFileAsyncClient dataLakeFileAsyncClient) { Objects.requireNonNull(dataLakeFileAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeFileAsyncClient.getFileUrl(), dataLakeFileAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryClient DataLakeDirectoryClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryClient(DataLakeDirectoryClient dataLakeDirectoryClient) { Objects.requireNonNull(dataLakeDirectoryClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeDirectoryClient.getDirectoryUrl(), dataLakeDirectoryClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryAsyncClient DataLakeDirectoryAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryAsyncClient(DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient) { Objects.requireNonNull(dataLakeDirectoryAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeDirectoryAsyncClient.getDirectoryUrl(), dataLakeDirectoryAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemClient DataLakeFileSystemClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemClient(DataLakeFileSystemClient dataLakeFileSystemClient) { Objects.requireNonNull(dataLakeFileSystemClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemClient.getFileSystemUrl(), dataLakeFileSystemClient.getHttpPipeline()).buildClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemAsyncClient}. This will set the {@link * HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemAsyncClient DataLakeFileSystemAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemAsyncClient( DataLakeFileSystemAsyncClient dataLakeFileSystemAsyncClient) { Objects.requireNonNull(dataLakeFileSystemAsyncClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemAsyncClient.getFileSystemUrl(), dataLakeFileSystemAsyncClient.getHttpPipeline()).buildClient()); return this; } /** * Sets the identifier for the lease. * * <p>If a lease ID isn't set then a {@link UUID} will be used.</p> * * @param leaseId Identifier for the lease. * @return the updated DataLakeLeaseClientBuilder object */ public DataLakeLeaseClientBuilder leaseId(String leaseId) { blobLeaseClientBuilder.leaseId(leaseId); return this; } /** * Initializes a {@link SpecializedBlobClientBuilder} * @param dfsEndpoint The endpoint for the {@link SpecializedBlobClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link SpecializedBlobClientBuilder} * @return the {@link SpecializedBlobClientBuilder} */ private SpecializedBlobClientBuilder getSpecializedBlobClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = BlobUrlParts.parse( DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs")).toUrl().toString(); return new SpecializedBlobClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); } /** * Initializes a {@link BlobContainerClientBuilder} * @param dfsEndpoint The endpoint for the {@link BlobContainerClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link BlobContainerClientBuilder} * @return the {@link BlobContainerClientBuilder} */ }
class DataLakeLeaseClientBuilder { final BlobLeaseClientBuilder blobLeaseClientBuilder; /** * Creates a new instance of {@link DataLakeLeaseClientBuilder}. */ public DataLakeLeaseClientBuilder() { blobLeaseClientBuilder = new BlobLeaseClientBuilder(); } /** * Creates a {@link DataLakeLeaseClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseClient} based on the configurations in this builder. */ public DataLakeLeaseClient buildClient() { return new DataLakeLeaseClient(blobLeaseClientBuilder.buildClient()); } /** * Creates a {@link DataLakeLeaseAsyncClient} based on the configurations set in the builder. * * @return a {@link DataLakeLeaseAsyncClient} based on the configurations in this builder. */ public DataLakeLeaseAsyncClient buildAsyncClient() { return new DataLakeLeaseAsyncClient(blobLeaseClientBuilder.buildAsyncClient()); } /** * Configures the builder based on the passed {@link DataLakeFileClient}. This will set the {@link HttpPipeline} and * {@link URL} that are used to interact with the service. * * @param dataLakeFileClient DataLakeFileClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileClient(DataLakeFileClient dataLakeFileClient) { Objects.requireNonNull(dataLakeFileClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeFileClient.getFileUrl(), dataLakeFileClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileAsyncClient DataLakeFileAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeFileAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileAsyncClient(DataLakeFileAsyncClient dataLakeFileAsyncClient) { Objects.requireNonNull(dataLakeFileAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeFileAsyncClient.getFileUrl(), dataLakeFileAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryClient DataLakeDirectoryClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryClient(DataLakeDirectoryClient dataLakeDirectoryClient) { Objects.requireNonNull(dataLakeDirectoryClient); blobLeaseClientBuilder.blobClient( getSpecializedBlobClientBuilder(dataLakeDirectoryClient.getDirectoryUrl(), dataLakeDirectoryClient.getHttpPipeline()).buildBlockBlobClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeDirectoryAsyncClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeDirectoryAsyncClient DataLakeDirectoryAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code DataLakeDirectoryAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder directoryAsyncClient(DataLakeDirectoryAsyncClient dataLakeDirectoryAsyncClient) { Objects.requireNonNull(dataLakeDirectoryAsyncClient); blobLeaseClientBuilder.blobAsyncClient( getSpecializedBlobClientBuilder(dataLakeDirectoryAsyncClient.getDirectoryUrl(), dataLakeDirectoryAsyncClient.getHttpPipeline()).buildBlockBlobAsyncClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemClient}. This will set the * {@link HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemClient DataLakeFileSystemClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemClient(DataLakeFileSystemClient dataLakeFileSystemClient) { Objects.requireNonNull(dataLakeFileSystemClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemClient.getFileSystemUrl(), dataLakeFileSystemClient.getHttpPipeline()).buildClient()); return this; } /** * Configures the builder based on the passed {@link DataLakeFileSystemAsyncClient}. This will set the {@link * HttpPipeline} and {@link URL} that are used to interact with the service. * * @param dataLakeFileSystemAsyncClient DataLakeFileSystemAsyncClient used to configure the builder. * @return the updated DataLakeLeaseClientBuilder object * @throws NullPointerException If {@code dataLakeFileSystemAsyncClient} is {@code null}. */ public DataLakeLeaseClientBuilder fileSystemAsyncClient( DataLakeFileSystemAsyncClient dataLakeFileSystemAsyncClient) { Objects.requireNonNull(dataLakeFileSystemAsyncClient); blobLeaseClientBuilder.containerClient( getBlobContainerClientBuilder(dataLakeFileSystemAsyncClient.getFileSystemUrl(), dataLakeFileSystemAsyncClient.getHttpPipeline()).buildClient()); return this; } /** * Sets the identifier for the lease. * * <p>If a lease ID isn't set then a {@link UUID} will be used.</p> * * @param leaseId Identifier for the lease. * @return the updated DataLakeLeaseClientBuilder object */ public DataLakeLeaseClientBuilder leaseId(String leaseId) { blobLeaseClientBuilder.leaseId(leaseId); return this; } /** * Initializes a {@link SpecializedBlobClientBuilder} * @param dfsEndpoint The endpoint for the {@link SpecializedBlobClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link SpecializedBlobClientBuilder} * @return the {@link SpecializedBlobClientBuilder} */ private SpecializedBlobClientBuilder getSpecializedBlobClientBuilder(String dfsEndpoint, HttpPipeline pipeline) { String blobEndpoint = DataLakeImplUtils.endpointToDesiredEndpoint(dfsEndpoint, "blob", "dfs"); return new SpecializedBlobClientBuilder() .pipeline(pipeline) .endpoint(blobEndpoint) .serviceVersion(BlobServiceVersion.getLatest()); } /** * Initializes a {@link BlobContainerClientBuilder} * @param dfsEndpoint The endpoint for the {@link BlobContainerClientBuilder} * @param pipeline The {@link HttpPipeline} for the {@link BlobContainerClientBuilder} * @return the {@link BlobContainerClientBuilder} */ }
changed to stringToMatch
public static String endpointToDesiredEndpoint(String endpoint, String desiredEndpoint, String currentEndpoint) { String desiredRegex = "." + desiredEndpoint + "."; String currentRegex = "." + currentEndpoint + "."; if (endpoint.contains(desiredRegex)) { return endpoint; } else { return endpoint.replaceFirst(currentRegex, desiredRegex); } }
String desiredRegex = "." + desiredEndpoint + ".";
public static String endpointToDesiredEndpoint(String endpoint, String desiredEndpoint, String currentEndpoint) { String desiredStringToMatch = "." + desiredEndpoint + "."; String currentStringToMatch = "." + currentEndpoint + "."; if (endpoint.contains(desiredStringToMatch)) { return endpoint; } else { return endpoint.replaceFirst(currentStringToMatch, desiredStringToMatch); } }
class DataLakeImplUtils { }
class DataLakeImplUtils { }
thoughts on moving 429 and 503 checks to `RetryStrategy::calculateRetryDelay(HttpResponse, int)` as well? this way we ensure `RetryStrategy::calculateRetryDelay(HttpResponse, int)` is always gets called
private Duration determineDelayDuration(HttpResponse response, int tryCount) { int code = response.getStatusCode(); if (code != 429 && code != 503) { return retryStrategy.calculateRetryDelay(tryCount); } return retryStrategy.calculateRetryDelay(response, tryCount); }
&& code != 503) {
private Duration determineDelayDuration(HttpResponse response, int tryCount) { int code = response.getStatusCode(); if (code != 429 && code != 503) { return retryStrategy.calculateRetryDelay(tryCount); } String retryHeaderValue = null; if (!isNullOrEmpty(this.retryAfterHeader)) { retryHeaderValue = response.getHeaderValue(this.retryAfterHeader); } if (isNullOrEmpty(retryHeaderValue)) { return this.retryStrategy.calculateRetryDelay(tryCount); } return Duration.of(Integer.parseInt(retryHeaderValue), this.retryAfterTimeUnit); }
class RetryPolicy implements HttpPipelinePolicy { private final ClientLogger logger = new ClientLogger(RetryPolicy.class); private final RetryStrategy retryStrategy; /** * Creates {@link RetryPolicy} with default {@link ExponentialBackoff} as {@link RetryStrategy} and use * 'retry-after-ms' in {@link HttpResponse} header for calculating retry delay. */ public RetryPolicy() { this(new ExponentialBackoff()); } /** * Creates a {@link RetryPolicy} with the provided {@link RetryStrategy}. * * @param retryStrategy The {@link RetryStrategy} used for retries. */ public RetryPolicy(RetryStrategy retryStrategy) { this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null"); } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { return attemptAsync(context, next, context.getHttpRequest(), 0); } private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next, final HttpRequest originalHttpRequest, final int tryCount) { context.setHttpRequest(originalHttpRequest.copy()); return next.clone().process() .flatMap(httpResponse -> { if (shouldRetry(httpResponse, tryCount)) { final Duration delayDuration = determineDelayDuration(httpResponse, tryCount); logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount, delayDuration.getSeconds()); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(delayDuration); } else { return Mono.just(httpResponse); } }) .onErrorResume(err -> { int maxRetries = retryStrategy.getMaxRetries(); if (tryCount < maxRetries) { logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(retryStrategy.calculateRetryDelay(tryCount)); } else { return Mono.error(new RuntimeException( String.format("Max retries %d times exceeded. Error Details: %s", maxRetries, err.getMessage()), err)); } }); } private boolean shouldRetry(HttpResponse response, int tryCount) { return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response); } /** * Determines the delay duration that should be waited before retrying. * @param response HTTP response * @return If the HTTP response has a retry-after-ms header that will be returned, * otherwise the duration used during the construction of the policy. */ }
class RetryPolicy implements HttpPipelinePolicy { private final ClientLogger logger = new ClientLogger(RetryPolicy.class); private final RetryStrategy retryStrategy; private final String retryAfterHeader; private final ChronoUnit retryAfterTimeUnit; /** * Creates {@link RetryPolicy} with default {@link ExponentialBackoff} as {@link RetryStrategy} and ignore the * delay provided in response header. */ public RetryPolicy() { this(new ExponentialBackoff(), null, null); } /** * Creates {@link RetryPolicy} with default {@link ExponentialBackoff} as {@link RetryStrategy} and use * provided {@code retryAfterHeader} in {@link HttpResponse} headers for calculating retry delay. * * @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the * retry delay. If the value is {@code null}, {@link RetryPolicy} will use the retry strategy to compute the delay * and ignore the delay provided in response header. * @param retryAfterTimeUnit The time unit to use when applying the retry delay. {@code null} is valid if, and only * if, {@code retryAfterHeader} is {@code null}. * @throws NullPointerException When {@code retryAfterTimeUnit} is {@code null} and {@code retryAfterHeader} is * not {@code null}. */ public RetryPolicy(String retryAfterHeader, ChronoUnit retryAfterTimeUnit) { this(new ExponentialBackoff(), retryAfterHeader, retryAfterTimeUnit); } /** * Creates {@link RetryPolicy} with the provided {@link RetryStrategy} and default {@link ExponentialBackoff} * as {@link RetryStrategy}. It will use provided {@code retryAfterHeader} in {@link HttpResponse} headers for * calculating retry delay. * * @param retryStrategy The {@link RetryStrategy} used for retries. * @param retryAfterHeader The HTTP header, such as 'Retry-After' or 'x-ms-retry-after-ms', to lookup for the * retry delay. If the value is {@code null}, {@link RetryPolicy} will use the retry strategy to compute the delay * and ignore the delay provided in response header. * @param retryAfterTimeUnit The time unit to use when applying the retry delay. {@code null} is valid if, and only * if, {@code retryAfterHeader} is {@code null}. * * @throws NullPointerException When {@code retryStrategy} is {@code null}. Also when {@code retryAfterTimeUnit} * is {@code null} and {@code retryAfterHeader} is not {@code null}. */ public RetryPolicy(RetryStrategy retryStrategy, String retryAfterHeader, ChronoUnit retryAfterTimeUnit) { this.retryStrategy = Objects.requireNonNull(retryStrategy, "'retryStrategy' cannot be null."); this.retryAfterHeader = retryAfterHeader; this.retryAfterTimeUnit = retryAfterTimeUnit; if (!isNullOrEmpty(retryAfterHeader)) { Objects.requireNonNull(retryAfterTimeUnit, "'retryAfterTimeUnit' cannot be null."); } } /** * Creates a {@link RetryPolicy} with the provided {@link RetryStrategy} and ignore the delay provided in * response header. * * @param retryStrategy The {@link RetryStrategy} used for retries. * * @throws NullPointerException When {@code retryStrategy} is {@code null}. */ public RetryPolicy(RetryStrategy retryStrategy) { this(retryStrategy, null, null); } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { return attemptAsync(context, next, context.getHttpRequest(), 0); } private Mono<HttpResponse> attemptAsync(final HttpPipelineCallContext context, final HttpPipelineNextPolicy next, final HttpRequest originalHttpRequest, final int tryCount) { context.setHttpRequest(originalHttpRequest.copy()); return next.clone().process() .flatMap(httpResponse -> { if (shouldRetry(httpResponse, tryCount)) { final Duration delayDuration = determineDelayDuration(httpResponse, tryCount); logger.verbose("[Retrying] Try count: {}, Delay duration in seconds: {}", tryCount, delayDuration.getSeconds()); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(delayDuration); } else { return Mono.just(httpResponse); } }) .onErrorResume(err -> { int maxRetries = retryStrategy.getMaxRetries(); if (tryCount < maxRetries) { logger.verbose("[Error Resume] Try count: {}, Error: {}", tryCount, err); return attemptAsync(context, next, originalHttpRequest, tryCount + 1) .delaySubscription(retryStrategy.calculateRetryDelay(tryCount)); } else { return Mono.error(new RuntimeException( String.format("Max retries %d times exceeded. Error Details: %s", maxRetries, err.getMessage()), err)); } }); } private boolean shouldRetry(HttpResponse response, int tryCount) { return tryCount < retryStrategy.getMaxRetries() && retryStrategy.shouldRetry(response); } /** * Determines the delay duration that should be waited before retrying. * @param response HTTP response * @return If the HTTP response has a retry-after-ms header that will be returned, * otherwise the duration used during the construction of the policy. */ }
Looks like by default we handle following headers in `RetryStrategy` default method: ``` retry-after-ms x-ms-retry-after-ms ``` so I guess we don't need this customization in AzConfig?
public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; ConfigurationClientCredentials configurationCredentials = getConfigurationCredentials(buildConfiguration); String buildEndpoint = getBuildEndpoint(configurationCredentials); Objects.requireNonNull(buildEndpoint); ConfigurationServiceVersion serviceVersion = version != null ? version : ConfigurationServiceVersion.getLatest(); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } ConfigurationClientCredentials buildCredential = (credential == null) ? configurationCredentials : credential; if (buildCredential == null) { throw logger.logExceptionAsWarning(new IllegalStateException("'credential' is required.")); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); policies.add(new ConfigurationCredentialsPolicy(buildCredential)); HttpPolicyProviders.addBeforeRetryPolicies(policies); RetryPolicy defaultRetryPolicy = new RetryPolicy(new ExponentialBackoff() { @Override public Duration calculateRetryDelay(HttpResponse httpResponse, int retryAttempts) { String delay = httpResponse.getHeaderValue(RETRY_AFTER_MS_HEADER); if (delay != null) { return Duration.ofMillis(Long.parseLong(delay)); } else { return calculateRetryDelay(retryAttempts); } } }); policies.add(retryPolicy == null ? defaultRetryPolicy : retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); }
String delay = httpResponse.getHeaderValue(RETRY_AFTER_MS_HEADER);
public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; ConfigurationClientCredentials configurationCredentials = getConfigurationCredentials(buildConfiguration); String buildEndpoint = getBuildEndpoint(configurationCredentials); Objects.requireNonNull(buildEndpoint); ConfigurationServiceVersion serviceVersion = version != null ? version : ConfigurationServiceVersion.getLatest(); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } ConfigurationClientCredentials buildCredential = (credential == null) ? configurationCredentials : credential; if (buildCredential == null) { throw logger.logExceptionAsWarning(new IllegalStateException("'credential' is required.")); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); policies.add(new ConfigurationCredentialsPolicy(buildCredential)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private static final String APP_CONFIG_PROPERTIES = "azure-appconfig.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final String clientName; private final String clientVersion; private ConfigurationClientCredentials credential; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private ConfigurationServiceVersion version; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES); clientName = properties.getOrDefault(NAME, "UnknownName"); clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param connectionString Connection string in the format "endpoint={endpoint_value};id={id_value}; * secret={secret_value}" * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); try { this.credential = new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException err) { throw logger.logExceptionAsError(new IllegalArgumentException( "The secret is invalid and cannot instantiate the HMAC-SHA256 algorithm.", err)); } catch (NoSuchAlgorithmException err) { throw logger.logExceptionAsError( new IllegalArgumentException("HMAC-SHA256 MAC algorithm cannot be instantiated.", err)); } this.endpoint = credential.getBaseUri(); return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * ConfigurationClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that is used when each request is sent. * * The default retry policy will be used if not provided {@link ConfigurationClientBuilder * to build {@link ConfigurationAsyncClient} or {@link ConfigurationClient}. * @param retryPolicy user's retry policy applied to each request. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder retryPolicy(HttpPipelinePolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ConfigurationServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ConfigurationServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder serviceVersion(ConfigurationServiceVersion version) { this.version = version; return this; } private ConfigurationClientCredentials getConfigurationCredentials(Configuration configuration) { String connectionString = configuration.get("AZURE_APPCONFIG_CONNECTION_STRING"); if (CoreUtils.isNullOrEmpty(connectionString)) { return credential; } try { return new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException | NoSuchAlgorithmException ex) { return null; } } private String getBuildEndpoint(ConfigurationClientCredentials buildCredentials) { if (endpoint != null) { return endpoint; } else if (buildCredentials != null) { return buildCredentials.getBaseUri(); } else { return null; } } }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private static final String APP_CONFIG_PROPERTIES = "azure-appconfig.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final String clientName; private final String clientVersion; private ConfigurationClientCredentials credential; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private ConfigurationServiceVersion version; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES); clientName = properties.getOrDefault(NAME, "UnknownName"); clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param connectionString Connection string in the format "endpoint={endpoint_value};id={id_value}; * secret={secret_value}" * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); try { this.credential = new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException err) { throw logger.logExceptionAsError(new IllegalArgumentException( "The secret is invalid and cannot instantiate the HMAC-SHA256 algorithm.", err)); } catch (NoSuchAlgorithmException err) { throw logger.logExceptionAsError( new IllegalArgumentException("HMAC-SHA256 MAC algorithm cannot be instantiated.", err)); } this.endpoint = credential.getBaseUri(); return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * ConfigurationClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that is used when each request is sent. * * The default retry policy will be used if not provided {@link ConfigurationClientBuilder * to build {@link ConfigurationAsyncClient} or {@link ConfigurationClient}. * @param retryPolicy user's retry policy applied to each request. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder retryPolicy(HttpPipelinePolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ConfigurationServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ConfigurationServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder serviceVersion(ConfigurationServiceVersion version) { this.version = version; return this; } private ConfigurationClientCredentials getConfigurationCredentials(Configuration configuration) { String connectionString = configuration.get("AZURE_APPCONFIG_CONNECTION_STRING"); if (CoreUtils.isNullOrEmpty(connectionString)) { return credential; } try { return new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException | NoSuchAlgorithmException ex) { return null; } } private String getBuildEndpoint(ConfigurationClientCredentials buildCredentials) { if (endpoint != null) { return endpoint; } else if (buildCredentials != null) { return buildCredentials.getBaseUri(); } else { return null; } } }
based on what we spoke today, we will only use one response header to avoid any complexity in core. Header name x-ms-retry-after-ms
public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; ConfigurationClientCredentials configurationCredentials = getConfigurationCredentials(buildConfiguration); String buildEndpoint = getBuildEndpoint(configurationCredentials); Objects.requireNonNull(buildEndpoint); ConfigurationServiceVersion serviceVersion = version != null ? version : ConfigurationServiceVersion.getLatest(); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } ConfigurationClientCredentials buildCredential = (credential == null) ? configurationCredentials : credential; if (buildCredential == null) { throw logger.logExceptionAsWarning(new IllegalStateException("'credential' is required.")); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration, serviceVersion)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); policies.add(new ConfigurationCredentialsPolicy(buildCredential)); HttpPolicyProviders.addBeforeRetryPolicies(policies); RetryPolicy defaultRetryPolicy = new RetryPolicy(new ExponentialBackoff() { @Override public Duration calculateRetryDelay(HttpResponse httpResponse, int retryAttempts) { String delay = httpResponse.getHeaderValue(RETRY_AFTER_MS_HEADER); if (delay != null) { return Duration.ofMillis(Long.parseLong(delay)); } else { return calculateRetryDelay(retryAttempts); } } }); policies.add(retryPolicy == null ? defaultRetryPolicy : retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); }
String delay = httpResponse.getHeaderValue(RETRY_AFTER_MS_HEADER);
public ConfigurationAsyncClient buildAsyncClient() { Configuration buildConfiguration = (configuration == null) ? Configuration.getGlobalConfiguration().clone() : configuration; ConfigurationClientCredentials configurationCredentials = getConfigurationCredentials(buildConfiguration); String buildEndpoint = getBuildEndpoint(configurationCredentials); Objects.requireNonNull(buildEndpoint); ConfigurationServiceVersion serviceVersion = version != null ? version : ConfigurationServiceVersion.getLatest(); if (pipeline != null) { return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); } ConfigurationClientCredentials buildCredential = (credential == null) ? configurationCredentials : credential; if (buildCredential == null) { throw logger.logExceptionAsWarning(new IllegalStateException("'credential' is required.")); } final List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(new UserAgentPolicy(httpLogOptions.getApplicationId(), clientName, clientVersion, buildConfiguration)); policies.add(new RequestIdPolicy()); policies.add(new AddHeadersPolicy(headers)); policies.add(new AddDatePolicy()); policies.add(new ConfigurationCredentialsPolicy(buildCredential)); HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(retryPolicy == null ? DEFAULT_RETRY_POLICY : retryPolicy); policies.addAll(this.policies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(new HttpLoggingPolicy(httpLogOptions)); HttpPipeline pipeline = new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); return new ConfigurationAsyncClient(buildEndpoint, pipeline, serviceVersion); }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private static final String APP_CONFIG_PROPERTIES = "azure-appconfig.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final String clientName; private final String clientVersion; private ConfigurationClientCredentials credential; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private ConfigurationServiceVersion version; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES); clientName = properties.getOrDefault(NAME, "UnknownName"); clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param connectionString Connection string in the format "endpoint={endpoint_value};id={id_value}; * secret={secret_value}" * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); try { this.credential = new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException err) { throw logger.logExceptionAsError(new IllegalArgumentException( "The secret is invalid and cannot instantiate the HMAC-SHA256 algorithm.", err)); } catch (NoSuchAlgorithmException err) { throw logger.logExceptionAsError( new IllegalArgumentException("HMAC-SHA256 MAC algorithm cannot be instantiated.", err)); } this.endpoint = credential.getBaseUri(); return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * ConfigurationClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that is used when each request is sent. * * The default retry policy will be used if not provided {@link ConfigurationClientBuilder * to build {@link ConfigurationAsyncClient} or {@link ConfigurationClient}. * @param retryPolicy user's retry policy applied to each request. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder retryPolicy(HttpPipelinePolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ConfigurationServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ConfigurationServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder serviceVersion(ConfigurationServiceVersion version) { this.version = version; return this; } private ConfigurationClientCredentials getConfigurationCredentials(Configuration configuration) { String connectionString = configuration.get("AZURE_APPCONFIG_CONNECTION_STRING"); if (CoreUtils.isNullOrEmpty(connectionString)) { return credential; } try { return new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException | NoSuchAlgorithmException ex) { return null; } } private String getBuildEndpoint(ConfigurationClientCredentials buildCredentials) { if (endpoint != null) { return endpoint; } else if (buildCredentials != null) { return buildCredentials.getBaseUri(); } else { return null; } } }
class ConfigurationClientBuilder { private static final String ECHO_REQUEST_ID_HEADER = "x-ms-return-client-request-id"; private static final String CONTENT_TYPE_HEADER = "Content-Type"; private static final String CONTENT_TYPE_HEADER_VALUE = "application/json"; private static final String ACCEPT_HEADER = "Accept"; private static final String ACCEPT_HEADER_VALUE = "application/vnd.microsoft.azconfig.kv+json"; private static final String APP_CONFIG_PROPERTIES = "azure-appconfig.properties"; private static final String NAME = "name"; private static final String VERSION = "version"; private static final RetryPolicy DEFAULT_RETRY_POLICY = new RetryPolicy("retry-after-ms", ChronoUnit.MILLIS); private final ClientLogger logger = new ClientLogger(ConfigurationClientBuilder.class); private final List<HttpPipelinePolicy> policies; private final HttpHeaders headers; private final String clientName; private final String clientVersion; private ConfigurationClientCredentials credential; private String endpoint; private HttpClient httpClient; private HttpLogOptions httpLogOptions; private HttpPipeline pipeline; private HttpPipelinePolicy retryPolicy; private Configuration configuration; private ConfigurationServiceVersion version; /** * The constructor with defaults. */ public ConfigurationClientBuilder() { policies = new ArrayList<>(); httpLogOptions = new HttpLogOptions(); Map<String, String> properties = CoreUtils.getProperties(APP_CONFIG_PROPERTIES); clientName = properties.getOrDefault(NAME, "UnknownName"); clientVersion = properties.getOrDefault(VERSION, "UnknownVersion"); headers = new HttpHeaders() .put(ECHO_REQUEST_ID_HEADER, "true") .put(CONTENT_TYPE_HEADER, CONTENT_TYPE_HEADER_VALUE) .put(ACCEPT_HEADER, ACCEPT_HEADER_VALUE); } /** * Creates a {@link ConfigurationClient} based on options set in the Builder. Every time {@code buildClient()} is * called a new instance of {@link ConfigurationClient} is created. * * <p> * If {@link * {@link * settings are ignored.</p> * * @return A ConfigurationClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ public ConfigurationClient buildClient() { return new ConfigurationClient(buildAsyncClient()); } /** * Creates a {@link ConfigurationAsyncClient} based on options set in the Builder. Every time * {@code buildAsyncClient()} is called a new instance of {@link ConfigurationAsyncClient} is created. * * <p> * If {@link * {@link * builder settings are ignored. * </p> * * @return A ConfigurationAsyncClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} has not been set. This setting is automatically set when * {@link * explicitly by calling {@link * @throws IllegalStateException If {@link */ /** * Sets the service endpoint for the Azure App Configuration instance. * * @param endpoint The URL of the Azure App Configuration instance to send {@link ConfigurationSetting} * service requests to and receive responses from. * @return The updated ConfigurationClientBuilder object. * @throws IllegalArgumentException if {@code endpoint} is null or it cannot be parsed into a valid URL. */ public ConfigurationClientBuilder endpoint(String endpoint) { try { new URL(endpoint); } catch (MalformedURLException ex) { throw logger.logExceptionAsWarning(new IllegalArgumentException("'endpoint' must be a valid URL")); } this.endpoint = endpoint; return this; } /** * Sets the credential to use when authenticating HTTP requests. Also, sets the {@link * for this ConfigurationClientBuilder. * * @param connectionString Connection string in the format "endpoint={endpoint_value};id={id_value}; * secret={secret_value}" * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code credential} is {@code null}. */ public ConfigurationClientBuilder connectionString(String connectionString) { Objects.requireNonNull(connectionString); try { this.credential = new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException err) { throw logger.logExceptionAsError(new IllegalArgumentException( "The secret is invalid and cannot instantiate the HMAC-SHA256 algorithm.", err)); } catch (NoSuchAlgorithmException err) { throw logger.logExceptionAsError( new IllegalArgumentException("HMAC-SHA256 MAC algorithm cannot be instantiated.", err)); } this.endpoint = credential.getBaseUri(); return this; } /** * Sets the logging configuration for HTTP requests and responses. * * <p> If logLevel is not provided, default value of {@link HttpLogDetailLevel * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpLogOptions(HttpLogOptions logOptions) { httpLogOptions = logOptions; return this; } /** * Adds a policy to the set of existing policies that are executed after required policies. * * @param policy The retry policy for service requests. * @return The updated ConfigurationClientBuilder object. * @throws NullPointerException If {@code policy} is {@code null}. */ public ConfigurationClientBuilder addPolicy(HttpPipelinePolicy policy) { Objects.requireNonNull(policy); policies.add(policy); return this; } /** * Sets the HTTP client to use for sending and receiving requests to and from the service. * * @param client The HTTP client to use for requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder httpClient(HttpClient client) { if (this.httpClient != null && client == null) { logger.info("HttpClient is being set to 'null' when it was previously configured."); } this.httpClient = client; return this; } /** * Sets the HTTP pipeline to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from * {@link ConfigurationClientBuilder * ConfigurationClient}. * * @param pipeline The HTTP pipeline to use for sending service requests and receiving responses. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder pipeline(HttpPipeline pipeline) { if (this.pipeline != null && pipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.pipeline = pipeline; return this; } /** * Sets the configuration store that is used during construction of the service client. * * The default configuration store is a clone of the {@link Configuration * configuration store}, use {@link Configuration * * @param configuration The configuration store used to * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the {@link HttpPipelinePolicy} that is used when each request is sent. * * The default retry policy will be used if not provided {@link ConfigurationClientBuilder * to build {@link ConfigurationAsyncClient} or {@link ConfigurationClient}. * @param retryPolicy user's retry policy applied to each request. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder retryPolicy(HttpPipelinePolicy retryPolicy) { this.retryPolicy = retryPolicy; return this; } /** * Sets the {@link ConfigurationServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ConfigurationServiceVersion} of the service to be used when making requests. * @return The updated ConfigurationClientBuilder object. */ public ConfigurationClientBuilder serviceVersion(ConfigurationServiceVersion version) { this.version = version; return this; } private ConfigurationClientCredentials getConfigurationCredentials(Configuration configuration) { String connectionString = configuration.get("AZURE_APPCONFIG_CONNECTION_STRING"); if (CoreUtils.isNullOrEmpty(connectionString)) { return credential; } try { return new ConfigurationClientCredentials(connectionString); } catch (InvalidKeyException | NoSuchAlgorithmException ex) { return null; } } private String getBuildEndpoint(ConfigurationClientCredentials buildCredentials) { if (endpoint != null) { return endpoint; } else if (buildCredentials != null) { return buildCredentials.getBaseUri(); } else { return null; } } }
We are using HttpHeaders because, There can be multiple headers which are of ID nature. We already have been asked for multiple id related headers by app config "x-ms-client-request-id" "x-ms-correlation-request-id" Thus `HttpHeaders` represent multiple Headers which client can set.
public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = requestIdSupplier; }
this.requestIdSupplier = requestIdSupplier;
public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = Objects.requireNonNull(requestIdSupplier, "'requestIdSupplier' must not be null"); }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = null; } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. {@code null} is * valid value. It is suggested that this {@link Supplier} should provide unique value every time * it is called. Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. */ @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (Objects.nonNull(requestIdSupplier)) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { httpHeaders.stream().forEach(httpHeader -> { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(httpHeader.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(httpHeader.getName(), httpHeader.getValue()); } }); return next.process(); } } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); } }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = () -> new HttpHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. It is suggested * that this {@link Supplier} provides unique value every time it is called. * Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. * * @throws NullPointerException when {@code requestIdSupplier} is {@code null}. */ @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { for (HttpHeader header : httpHeaders) { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(header.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(header.getName(), header.getValue()); } } return next.process(); } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); } }
Instead of setting this to `null` here could we just insert a default supplier which replicates current functionality? That way we don't need to do the initial `null` check when applying the policy.
public RequestIdPolicy() { requestIdSupplier = null; }
requestIdSupplier = null;
public RequestIdPolicy() { requestIdSupplier = () -> new HttpHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. {@code null} is * valid value. It is suggested that this {@link Supplier} should provide unique value every time * it is called. Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = requestIdSupplier; } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (Objects.nonNull(requestIdSupplier)) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { httpHeaders.stream().forEach(httpHeader -> { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(httpHeader.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(httpHeader.getName(), httpHeader.getValue()); } }); return next.process(); } } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); } }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. It is suggested * that this {@link Supplier} provides unique value every time it is called. * Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. * * @throws NullPointerException when {@code requestIdSupplier} is {@code null}. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = Objects.requireNonNull(requestIdSupplier, "'requestIdSupplier' must not be null"); } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { for (HttpHeader header : httpHeaders) { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(header.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(header.getName(), header.getValue()); } } return next.process(); } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); } }
Do we want this to be nullable?
public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = requestIdSupplier; }
this.requestIdSupplier = requestIdSupplier;
public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = Objects.requireNonNull(requestIdSupplier, "'requestIdSupplier' must not be null"); }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = null; } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. {@code null} is * valid value. It is suggested that this {@link Supplier} should provide unique value every time * it is called. Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. */ @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (Objects.nonNull(requestIdSupplier)) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { httpHeaders.stream().forEach(httpHeader -> { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(httpHeader.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(httpHeader.getName(), httpHeader.getValue()); } }); return next.process(); } } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); } }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = () -> new HttpHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. It is suggested * that this {@link Supplier} provides unique value every time it is called. * Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. * * @throws NullPointerException when {@code requestIdSupplier} is {@code null}. */ @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { for (HttpHeader header : httpHeaders) { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(header.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(header.getName(), header.getValue()); } } return next.process(); } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); } }
This is a general question for all policies which mutate the request, should this just insert the header value? @JonathanGiles @srnagar
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (Objects.nonNull(requestIdSupplier)) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { httpHeaders.stream().forEach(httpHeader -> { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(httpHeader.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(httpHeader.getName(), httpHeader.getValue()); } }); return next.process(); } } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); }
if (requestIdHeaderValue == null) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { for (HttpHeader header : httpHeaders) { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(header.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(header.getName(), header.getValue()); } } return next.process(); } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = null; } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. {@code null} is * valid value. It is suggested that this {@link Supplier} should provide unique value every time * it is called. Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = requestIdSupplier; } @Override }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = () -> new HttpHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. It is suggested * that this {@link Supplier} provides unique value every time it is called. * Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. * * @throws NullPointerException when {@code requestIdSupplier} is {@code null}. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = Objects.requireNonNull(requestIdSupplier, "'requestIdSupplier' must not be null"); } @Override }
Could we use a `for` loop as `HttpHeaders` implements `Iterable<HttpHeader>`
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (Objects.nonNull(requestIdSupplier)) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { httpHeaders.stream().forEach(httpHeader -> { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(httpHeader.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(httpHeader.getName(), httpHeader.getValue()); } }); return next.process(); } } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); }
httpHeaders.stream().forEach(httpHeader -> {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { for (HttpHeader header : httpHeaders) { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(header.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(header.getName(), header.getValue()); } } return next.process(); } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = null; } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. {@code null} is * valid value. It is suggested that this {@link Supplier} should provide unique value every time * it is called. Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = requestIdSupplier; } @Override }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = () -> new HttpHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. It is suggested * that this {@link Supplier} provides unique value every time it is called. * Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. * * @throws NullPointerException when {@code requestIdSupplier} is {@code null}. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = Objects.requireNonNull(requestIdSupplier, "'requestIdSupplier' must not be null"); } @Override }
Continuing from a previous comment, if the supplier is made non-nullable and we add a default header in the base constructor (and possible default to this is null is passed into the overloaded constructor), should we do nothing if the supplier returns nothing. `x-ms-client-request-id` is optional by many services, so this would be allowed, but it would hurt tracking issues.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (Objects.nonNull(requestIdSupplier)) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { httpHeaders.stream().forEach(httpHeader -> { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(httpHeader.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(httpHeader.getName(), httpHeader.getValue()); } }); return next.process(); } } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); }
if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { for (HttpHeader header : httpHeaders) { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(header.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(header.getName(), header.getValue()); } } return next.process(); } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = null; } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. {@code null} is * valid value. It is suggested that this {@link Supplier} should provide unique value every time * it is called. Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = requestIdSupplier; } @Override }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = () -> new HttpHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. It is suggested * that this {@link Supplier} provides unique value every time it is called. * Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. * * @throws NullPointerException when {@code requestIdSupplier} is {@code null}. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = Objects.requireNonNull(requestIdSupplier, "'requestIdSupplier' must not be null"); } @Override }
based on above suggestion, I am making it not nullable.
public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = requestIdSupplier; }
this.requestIdSupplier = requestIdSupplier;
public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = Objects.requireNonNull(requestIdSupplier, "'requestIdSupplier' must not be null"); }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = null; } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. {@code null} is * valid value. It is suggested that this {@link Supplier} should provide unique value every time * it is called. Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. */ @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (Objects.nonNull(requestIdSupplier)) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { httpHeaders.stream().forEach(httpHeader -> { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(httpHeader.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(httpHeader.getName(), httpHeader.getValue()); } }); return next.process(); } } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); } }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = () -> new HttpHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. It is suggested * that this {@link Supplier} provides unique value every time it is called. * Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. * * @throws NullPointerException when {@code requestIdSupplier} is {@code null}. */ @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { for (HttpHeader header : httpHeaders) { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(header.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(header.getName(), header.getValue()); } } return next.process(); } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); } }
You answered , I think, In order to track request, we do need request id. And if client does not provide or give us empty .. we should provide it.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (Objects.nonNull(requestIdSupplier)) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { httpHeaders.stream().forEach(httpHeader -> { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(httpHeader.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(httpHeader.getName(), httpHeader.getValue()); } }); return next.process(); } } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); }
if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { for (HttpHeader header : httpHeaders) { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(header.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(header.getName(), header.getValue()); } } return next.process(); } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = null; } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. {@code null} is * valid value. It is suggested that this {@link Supplier} should provide unique value every time * it is called. Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = requestIdSupplier; } @Override }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = () -> new HttpHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. It is suggested * that this {@link Supplier} provides unique value every time it is called. * Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. * * @throws NullPointerException when {@code requestIdSupplier} is {@code null}. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = Objects.requireNonNull(requestIdSupplier, "'requestIdSupplier' must not be null"); } @Override }
```suggestion for (HttpHeader header : httpHeaders) { ```
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { for (HttpHeader header:httpHeaders) { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(header.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(header.getName(), header.getValue()); } } return next.process(); } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); }
for (HttpHeader header:httpHeaders) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { for (HttpHeader header : httpHeaders) { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(header.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(header.getName(), header.getValue()); } } return next.process(); } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = () -> new HttpHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. It is suggested * that this {@link Supplier} provides unique value every time it is called. * Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. * * @throws NullPointerException when {@code requestIdSupplier} is {@code null}. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = Objects.requireNonNull(requestIdSupplier, "'requestIdSupplier' must not be null"); } @Override }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = () -> new HttpHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. It is suggested * that this {@link Supplier} provides unique value every time it is called. * Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. * * @throws NullPointerException when {@code requestIdSupplier} is {@code null}. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = Objects.requireNonNull(requestIdSupplier, "'requestIdSupplier' must not be null"); } @Override }
Wouldn't this allow users to provide a random set of HTTP headers and not just the request id headers? For e.g. the supplier can return ```java new HttpHeaders().put("foo", "bar").put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); ```
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { for (HttpHeader header : httpHeaders) { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(header.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(header.getName(), header.getValue()); } } return next.process(); } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); }
HttpHeaders httpHeaders = requestIdSupplier.get();
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { for (HttpHeader header : httpHeaders) { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(header.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(header.getName(), header.getValue()); } } return next.process(); } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = () -> new HttpHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. It is suggested * that this {@link Supplier} provides unique value every time it is called. * Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. * * @throws NullPointerException when {@code requestIdSupplier} is {@code null}. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = Objects.requireNonNull(requestIdSupplier, "'requestIdSupplier' must not be null"); } @Override }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = () -> new HttpHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. It is suggested * that this {@link Supplier} provides unique value every time it is called. * Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. * * @throws NullPointerException when {@code requestIdSupplier} is {@code null}. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = Objects.requireNonNull(requestIdSupplier, "'requestIdSupplier' must not be null"); } @Override }
yes . But unless we keep a bag of valid request id header, we can not validate. And whenever a new request-id header is introduced, we need to update the bag and release.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { for (HttpHeader header : httpHeaders) { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(header.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(header.getName(), header.getValue()); } } return next.process(); } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); }
HttpHeaders httpHeaders = requestIdSupplier.get();
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { HttpHeaders httpHeaders = requestIdSupplier.get(); if (Objects.nonNull(httpHeaders) && httpHeaders.getSize() > 0) { for (HttpHeader header : httpHeaders) { String requestIdHeaderValue = context.getHttpRequest().getHeaders().getValue(header.getName()); if (requestIdHeaderValue == null) { context.getHttpRequest().getHeaders().put(header.getName(), header.getValue()); } } return next.process(); } String requestId = context.getHttpRequest().getHeaders().getValue(REQUEST_ID_HEADER); if (requestId == null) { context.getHttpRequest().getHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } return next.process(); }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = () -> new HttpHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. It is suggested * that this {@link Supplier} provides unique value every time it is called. * Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. * * @throws NullPointerException when {@code requestIdSupplier} is {@code null}. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = Objects.requireNonNull(requestIdSupplier, "'requestIdSupplier' must not be null"); } @Override }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final Supplier<HttpHeaders> requestIdSupplier; /** * Creates default {@link RequestIdPolicy}. */ public RequestIdPolicy() { requestIdSupplier = () -> new HttpHeaders().put(REQUEST_ID_HEADER, UUID.randomUUID().toString()); } /** * Creates {@link RequestIdPolicy} with provided {@link Supplier} to dynamically generate request id for each * {@link HttpRequest}. * * @param requestIdSupplier to dynamically generate to request id for each {@link HttpRequest}. It is suggested * that this {@link Supplier} provides unique value every time it is called. * Example of these headers are 'x-ms-client-request-id', 'x-ms-correlation-request-id'. * * @throws NullPointerException when {@code requestIdSupplier} is {@code null}. */ public RequestIdPolicy(Supplier<HttpHeaders> requestIdSupplier) { this.requestIdSupplier = Objects.requireNonNull(requestIdSupplier, "'requestIdSupplier' must not be null"); } @Override }
OperatingSystemMXBean from java.lang.management does not have precise information of CPU usage in percentage , therefore using com.sun.management. Java.lang have https://docs.oracle.com/javase/7/docs/api/java/lang/management/OperatingSystemMXBean.html#getSystemLoadAverage(), which after running on VMs unable to get the the correlation with usage properly
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String used_Memory = totalMemory - freeMemory + " KB"; String available_Memory = (maxMemory - (totalMemory - freeMemory)) + " KB"; OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean) ManagementFactory.getOperatingSystemMXBean(); String processCpuLoad = Double.toString(mbean.getProcessCpuLoad()); String systemCpuLoad = Double.toString(mbean.getSystemCpuLoad()); stringBuilder.append("System State Information -------").append(System.lineSeparator()); stringBuilder.append("Used Memory : " + used_Memory).append(System.lineSeparator()); stringBuilder.append("Available Memory : " + available_Memory).append(System.lineSeparator()); stringBuilder.append("CPU Process Load : " + processCpuLoad).append(System.lineSeparator()); stringBuilder.append("CPU System Load : " + systemCpuLoad).append(System.lineSeparator()); } catch (Exception e) { } }
OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean)
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String usedMemory = totalMemory - freeMemory + " KB"; String availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB"; String processCpuLoad = Double.toString(this.mbean.getProcessCpuLoad()); String systemCpuLoad = Double.toString(this.mbean.getSystemCpuLoad()); stringBuilder.append("System State Information -------").append(System.lineSeparator()); stringBuilder.append("Used Memory : " + usedMemory).append(System.lineSeparator()); stringBuilder.append("Available Memory : " + availableMemory).append(System.lineSeparator()); stringBuilder.append("CPU Process Load : " + processCpuLoad).append(System.lineSeparator()); stringBuilder.append("CPU System Load : " + systemCpuLoad).append(System.lineSeparator()); } catch (Exception e) { } }
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private ZonedDateTime requestStartTime; private ZonedDateTime requestEndTime; private ConnectionMode connectionMode; private List<StoreResponseStatistics> responseStatisticsList; private List<StoreResponseStatistics> supplementalResponseStatisticsList; private Map<String, AddressResolutionStatistics> addressResolutionStatistics; private GatewayStatistic gatewayStatistic; private List<URI> contactedReplicas; private Set<URI> failedReplicas; private Set<URI> regionsContacted; ClientSideRequestStatistics() { this.requestStartTime = ZonedDateTime.now(ZoneOffset.UTC); this.requestEndTime = ZonedDateTime.now(ZoneOffset.UTC); this.responseStatisticsList = new ArrayList<>(); this.supplementalResponseStatisticsList = new ArrayList<>(); this.addressResolutionStatistics = new HashMap<>(); this.contactedReplicas = new ArrayList<>(); this.failedReplicas = new HashSet<>(); this.regionsContacted = new HashSet<>(); this.connectionMode = ConnectionMode.DIRECT; } Duration getRequestLatency() { return Duration.between(requestStartTime, requestEndTime); } private boolean isCPUOverloaded() { return false; } void recordResponse(RxDocumentServiceRequest request, StoreResult storeResult) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.DIRECT; StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics(); storeResponseStatistics.requestResponseTime = responseTime; storeResponseStatistics.storeResult = storeResult; storeResponseStatistics.requestOperationType = request.getOperationType(); storeResponseStatistics.requestResourceType = request.getResourceType(); URI locationEndPoint = null; if (request.requestContext.locationEndpointToRoute != null) { try { locationEndPoint = request.requestContext.locationEndpointToRoute.toURI(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } if (locationEndPoint != null) { this.regionsContacted.add(locationEndPoint); } if (storeResponseStatistics.requestOperationType == OperationType.Head || storeResponseStatistics.requestOperationType == OperationType.HeadFeed) { this.supplementalResponseStatisticsList.add(storeResponseStatistics); } else { this.responseStatisticsList.add(storeResponseStatistics); } } } void recordGatewayResponse(RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponse storeResponse, CosmosClientException exception) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.GATEWAY; synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } this.gatewayStatistic = new GatewayStatistic(); this.gatewayStatistic.operationType = rxDocumentServiceRequest.getOperationType(); if (storeResponse != null) { this.gatewayStatistic.statusCode = storeResponse.getStatus(); this.gatewayStatistic.subStatusCode = DirectBridgeInternal.getSubStatusCode(storeResponse); ISessionToken sessionToken = null; String headerValue; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN)) != null) { sessionToken = SessionTokenHelper.parse(headerValue); } double requestCharge = 0; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE)) != null) { requestCharge = Double.parseDouble(headerValue); } this.gatewayStatistic.sessionToken = sessionToken; this.gatewayStatistic.requestCharge = requestCharge; } else if(exception != null){ this.gatewayStatistic.statusCode = exception.getStatusCode(); this.gatewayStatistic.subStatusCode = exception.getSubStatusCode(); } } } String recordAddressResolutionStart(URI targetEndpoint) { String identifier = Utils.randomUUID().toString(); AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics(); resolutionStatistics.startTime = ZonedDateTime.now(ZoneOffset.UTC); resolutionStatistics.endTime = ZonedDateTime.of(LocalDateTime.MAX, ZoneOffset.UTC); resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString(); synchronized (this) { this.addressResolutionStatistics.put(identifier, resolutionStatistics); } return identifier; } void recordAddressResolutionEnd(String identifier) { if (StringUtils.isEmpty(identifier)) { return; } ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); synchronized (this) { if (!this.addressResolutionStatistics.containsKey(identifier)) { throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start before calling end"); } if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier); resolutionStatistics.endTime = responseTime; } } @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); synchronized (this) { stringBuilder.append("RequestStartTime: ") .append("\"").append(this.requestStartTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("RequestEndTime: ") .append("\"").append(this.requestEndTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("Duration: ") .append(Duration.between(requestStartTime, requestEndTime).toMillis()) .append(" ms, ") .append("Connection Mode : " + this.connectionMode.toString() + ", ") .append("NUMBER of regions attempted: ") .append(this.regionsContacted.isEmpty() ? 1 : this.regionsContacted.size()) .append(System.lineSeparator()); for (StoreResponseStatistics storeResponseStatistics : this.responseStatisticsList) { stringBuilder.append(storeResponseStatistics.toString()).append(System.lineSeparator()); } for (AddressResolutionStatistics value : this.addressResolutionStatistics.values()) { stringBuilder.append(value.toString()).append(System.lineSeparator()); } int supplementalResponseStatisticsListCount = this.supplementalResponseStatisticsList.size(); int initialIndex = Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0); if (initialIndex != 0) { stringBuilder.append(" -- Displaying only the last ") .append(MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING) .append(" head/headfeed requests. Total head/headfeed requests: ") .append(supplementalResponseStatisticsListCount); } for (int i = initialIndex; i < supplementalResponseStatisticsListCount; i++) { stringBuilder.append(this.supplementalResponseStatisticsList.get(i).toString()).append(System.lineSeparator()); } if (this.gatewayStatistic != null) { stringBuilder.append(this.gatewayStatistic).append(System.lineSeparator()); } printSystemInformation(stringBuilder); } String requestStatsString = stringBuilder.toString(); if (!requestStatsString.isEmpty()) { return System.lineSeparator() + requestStatsString; } return StringUtils.EMPTY; } List<URI> getContactedReplicas() { return contactedReplicas; } void setContactedReplicas(List<URI> contactedReplicas) { this.contactedReplicas = contactedReplicas; } Set<URI> getFailedReplicas() { return failedReplicas; } void setFailedReplicas(Set<URI> failedReplicas) { this.failedReplicas = failedReplicas; } Set<URI> getRegionsContacted() { return regionsContacted; } void setRegionsContacted(Set<URI> regionsContacted) { this.regionsContacted = regionsContacted; } private static String formatDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } return dateTime.format(responseTimeFormatter); } private class StoreResponseStatistics { private ZonedDateTime requestResponseTime; private StoreResult storeResult; private ResourceType requestResourceType; private OperationType requestOperationType; @Override public String toString() { return "StoreResponseStatistics{" + "requestResponseTime=\"" + formatDateTime(requestResponseTime) + "\"" + ", storeResult=" + storeResult + ", requestResourceType=" + requestResourceType + ", requestOperationType=" + requestOperationType + '}'; } } private class AddressResolutionStatistics { private ZonedDateTime startTime; private ZonedDateTime endTime; private String targetEndpoint; AddressResolutionStatistics() { } @Override public String toString() { return "AddressResolutionStatistics{" + "startTime=\"" + formatDateTime(startTime) + "\"" + ", endTime=\"" + formatDateTime(endTime) + "\"" + ", targetEndpoint='" + targetEndpoint + '\'' + '}'; } } private class GatewayStatistic { private OperationType operationType; private int statusCode; private int subStatusCode; private ISessionToken sessionToken; private double requestCharge; public String toString() { return "Gateway statistics{ " + "Operation Type : " + this.operationType + "Status Code : " + this.statusCode + "Substatus Code : " + this.statusCode + "Session Token : " + (this.sessionToken != null ? this.sessionToken.convertToString() : null) + "Request Charge : " + requestCharge + '}'; } } }
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private final static OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean)ManagementFactory.getOperatingSystemMXBean(); private ZonedDateTime requestStartTime; private ZonedDateTime requestEndTime; private ConnectionMode connectionMode; private List<StoreResponseStatistics> responseStatisticsList; private List<StoreResponseStatistics> supplementalResponseStatisticsList; private Map<String, AddressResolutionStatistics> addressResolutionStatistics; private GatewayStatistic gatewayStatistic; private List<URI> contactedReplicas; private Set<URI> failedReplicas; private Set<URI> regionsContacted; ClientSideRequestStatistics() { this.requestStartTime = ZonedDateTime.now(ZoneOffset.UTC); this.requestEndTime = ZonedDateTime.now(ZoneOffset.UTC); this.responseStatisticsList = new ArrayList<>(); this.supplementalResponseStatisticsList = new ArrayList<>(); this.addressResolutionStatistics = new HashMap<>(); this.contactedReplicas = new ArrayList<>(); this.failedReplicas = new HashSet<>(); this.regionsContacted = new HashSet<>(); this.connectionMode = ConnectionMode.DIRECT; } Duration getRequestLatency() { return Duration.between(requestStartTime, requestEndTime); } private boolean isCPUOverloaded() { return false; } void recordResponse(RxDocumentServiceRequest request, StoreResult storeResult) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.DIRECT; StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics(); storeResponseStatistics.requestResponseTime = responseTime; storeResponseStatistics.storeResult = storeResult; storeResponseStatistics.requestOperationType = request.getOperationType(); storeResponseStatistics.requestResourceType = request.getResourceType(); URI locationEndPoint = null; if (request.requestContext.locationEndpointToRoute != null) { try { locationEndPoint = request.requestContext.locationEndpointToRoute.toURI(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } if (locationEndPoint != null) { this.regionsContacted.add(locationEndPoint); } if (storeResponseStatistics.requestOperationType == OperationType.Head || storeResponseStatistics.requestOperationType == OperationType.HeadFeed) { this.supplementalResponseStatisticsList.add(storeResponseStatistics); } else { this.responseStatisticsList.add(storeResponseStatistics); } } } void recordGatewayResponse(RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponse storeResponse, CosmosClientException exception) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.GATEWAY; synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } this.gatewayStatistic = new GatewayStatistic(); this.gatewayStatistic.operationType = rxDocumentServiceRequest.getOperationType(); if (storeResponse != null) { this.gatewayStatistic.statusCode = storeResponse.getStatus(); this.gatewayStatistic.subStatusCode = DirectBridgeInternal.getSubStatusCode(storeResponse); this.gatewayStatistic.sessionToken = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN); this.gatewayStatistic.requestCharge = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE); } else if(exception != null){ this.gatewayStatistic.statusCode = exception.getStatusCode(); this.gatewayStatistic.subStatusCode = exception.getSubStatusCode(); } } } String recordAddressResolutionStart(URI targetEndpoint) { String identifier = Utils.randomUUID().toString(); AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics(); resolutionStatistics.startTime = ZonedDateTime.now(ZoneOffset.UTC); resolutionStatistics.endTime = ZonedDateTime.of(LocalDateTime.MAX, ZoneOffset.UTC); resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString(); synchronized (this) { this.addressResolutionStatistics.put(identifier, resolutionStatistics); } return identifier; } void recordAddressResolutionEnd(String identifier) { if (StringUtils.isEmpty(identifier)) { return; } ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); synchronized (this) { if (!this.addressResolutionStatistics.containsKey(identifier)) { throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start before calling end"); } if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier); resolutionStatistics.endTime = responseTime; } } @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); synchronized (this) { stringBuilder.append("RequestStartTime: ") .append("\"").append(this.requestStartTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("RequestEndTime: ") .append("\"").append(this.requestEndTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("Duration: ") .append(Duration.between(requestStartTime, requestEndTime).toMillis()) .append(" ms, ") .append("Connection Mode : " + this.connectionMode.toString() + ", ") .append("NUMBER of regions attempted: ") .append(this.regionsContacted.isEmpty() ? 1 : this.regionsContacted.size()) .append(System.lineSeparator()); for (StoreResponseStatistics storeResponseStatistics : this.responseStatisticsList) { stringBuilder.append(storeResponseStatistics.toString()).append(System.lineSeparator()); } for (AddressResolutionStatistics value : this.addressResolutionStatistics.values()) { stringBuilder.append(value.toString()).append(System.lineSeparator()); } int supplementalResponseStatisticsListCount = this.supplementalResponseStatisticsList.size(); int initialIndex = Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0); if (initialIndex != 0) { stringBuilder.append(" -- Displaying only the last ") .append(MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING) .append(" head/headfeed requests. Total head/headfeed requests: ") .append(supplementalResponseStatisticsListCount); } for (int i = initialIndex; i < supplementalResponseStatisticsListCount; i++) { stringBuilder.append(this.supplementalResponseStatisticsList.get(i).toString()).append(System.lineSeparator()); } if (this.gatewayStatistic != null) { stringBuilder.append(this.gatewayStatistic).append(System.lineSeparator()); } printSystemInformation(stringBuilder); } String requestStatsString = stringBuilder.toString(); if (!requestStatsString.isEmpty()) { return System.lineSeparator() + requestStatsString; } return StringUtils.EMPTY; } List<URI> getContactedReplicas() { return contactedReplicas; } void setContactedReplicas(List<URI> contactedReplicas) { this.contactedReplicas = contactedReplicas; } Set<URI> getFailedReplicas() { return failedReplicas; } void setFailedReplicas(Set<URI> failedReplicas) { this.failedReplicas = failedReplicas; } Set<URI> getRegionsContacted() { return regionsContacted; } void setRegionsContacted(Set<URI> regionsContacted) { this.regionsContacted = regionsContacted; } private static String formatDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } return dateTime.format(responseTimeFormatter); } private class StoreResponseStatistics { private ZonedDateTime requestResponseTime; private StoreResult storeResult; private ResourceType requestResourceType; private OperationType requestOperationType; @Override public String toString() { return "StoreResponseStatistics{" + "requestResponseTime=\"" + formatDateTime(requestResponseTime) + "\"" + ", storeResult=" + storeResult + ", requestResourceType=" + requestResourceType + ", requestOperationType=" + requestOperationType + '}'; } } private class AddressResolutionStatistics { private ZonedDateTime startTime; private ZonedDateTime endTime; private String targetEndpoint; AddressResolutionStatistics() { } @Override public String toString() { return "AddressResolutionStatistics{" + "startTime=\"" + formatDateTime(startTime) + "\"" + ", endTime=\"" + formatDateTime(endTime) + "\"" + ", targetEndpoint='" + targetEndpoint + '\'' + '}'; } } private class GatewayStatistic { private OperationType operationType; private int statusCode; private int subStatusCode; private String sessionToken; private String requestCharge; public String toString() { return "Gateway statistics{ " + "Operation Type : " + this.operationType + "Status Code : " + this.statusCode + "Substatus Code : " + this.statusCode + "Session Token : " + this.sessionToken + "Request Charge : " + requestCharge + '}'; } } }
This is oracle specific implementation. There are other JVMs too: IBM, Azul, Openjdk, etc, if the JVM is not oracle based implementation is com.sun.management available?
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String used_Memory = totalMemory - freeMemory + " KB"; String available_Memory = (maxMemory - (totalMemory - freeMemory)) + " KB"; OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean) ManagementFactory.getOperatingSystemMXBean(); String processCpuLoad = Double.toString(mbean.getProcessCpuLoad()); String systemCpuLoad = Double.toString(mbean.getSystemCpuLoad()); stringBuilder.append("System State Information -------").append(System.lineSeparator()); stringBuilder.append("Used Memory : " + used_Memory).append(System.lineSeparator()); stringBuilder.append("Available Memory : " + available_Memory).append(System.lineSeparator()); stringBuilder.append("CPU Process Load : " + processCpuLoad).append(System.lineSeparator()); stringBuilder.append("CPU System Load : " + systemCpuLoad).append(System.lineSeparator()); } catch (Exception e) { } }
OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean)
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String usedMemory = totalMemory - freeMemory + " KB"; String availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB"; String processCpuLoad = Double.toString(this.mbean.getProcessCpuLoad()); String systemCpuLoad = Double.toString(this.mbean.getSystemCpuLoad()); stringBuilder.append("System State Information -------").append(System.lineSeparator()); stringBuilder.append("Used Memory : " + usedMemory).append(System.lineSeparator()); stringBuilder.append("Available Memory : " + availableMemory).append(System.lineSeparator()); stringBuilder.append("CPU Process Load : " + processCpuLoad).append(System.lineSeparator()); stringBuilder.append("CPU System Load : " + systemCpuLoad).append(System.lineSeparator()); } catch (Exception e) { } }
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private ZonedDateTime requestStartTime; private ZonedDateTime requestEndTime; private ConnectionMode connectionMode; private List<StoreResponseStatistics> responseStatisticsList; private List<StoreResponseStatistics> supplementalResponseStatisticsList; private Map<String, AddressResolutionStatistics> addressResolutionStatistics; private GatewayStatistic gatewayStatistic; private List<URI> contactedReplicas; private Set<URI> failedReplicas; private Set<URI> regionsContacted; ClientSideRequestStatistics() { this.requestStartTime = ZonedDateTime.now(ZoneOffset.UTC); this.requestEndTime = ZonedDateTime.now(ZoneOffset.UTC); this.responseStatisticsList = new ArrayList<>(); this.supplementalResponseStatisticsList = new ArrayList<>(); this.addressResolutionStatistics = new HashMap<>(); this.contactedReplicas = new ArrayList<>(); this.failedReplicas = new HashSet<>(); this.regionsContacted = new HashSet<>(); this.connectionMode = ConnectionMode.DIRECT; } Duration getRequestLatency() { return Duration.between(requestStartTime, requestEndTime); } private boolean isCPUOverloaded() { return false; } void recordResponse(RxDocumentServiceRequest request, StoreResult storeResult) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.DIRECT; StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics(); storeResponseStatistics.requestResponseTime = responseTime; storeResponseStatistics.storeResult = storeResult; storeResponseStatistics.requestOperationType = request.getOperationType(); storeResponseStatistics.requestResourceType = request.getResourceType(); URI locationEndPoint = null; if (request.requestContext.locationEndpointToRoute != null) { try { locationEndPoint = request.requestContext.locationEndpointToRoute.toURI(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } if (locationEndPoint != null) { this.regionsContacted.add(locationEndPoint); } if (storeResponseStatistics.requestOperationType == OperationType.Head || storeResponseStatistics.requestOperationType == OperationType.HeadFeed) { this.supplementalResponseStatisticsList.add(storeResponseStatistics); } else { this.responseStatisticsList.add(storeResponseStatistics); } } } void recordGatewayResponse(RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponse storeResponse, CosmosClientException exception) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.GATEWAY; synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } this.gatewayStatistic = new GatewayStatistic(); this.gatewayStatistic.operationType = rxDocumentServiceRequest.getOperationType(); if (storeResponse != null) { this.gatewayStatistic.statusCode = storeResponse.getStatus(); this.gatewayStatistic.subStatusCode = DirectBridgeInternal.getSubStatusCode(storeResponse); ISessionToken sessionToken = null; String headerValue; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN)) != null) { sessionToken = SessionTokenHelper.parse(headerValue); } double requestCharge = 0; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE)) != null) { requestCharge = Double.parseDouble(headerValue); } this.gatewayStatistic.sessionToken = sessionToken; this.gatewayStatistic.requestCharge = requestCharge; } else if(exception != null){ this.gatewayStatistic.statusCode = exception.getStatusCode(); this.gatewayStatistic.subStatusCode = exception.getSubStatusCode(); } } } String recordAddressResolutionStart(URI targetEndpoint) { String identifier = Utils.randomUUID().toString(); AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics(); resolutionStatistics.startTime = ZonedDateTime.now(ZoneOffset.UTC); resolutionStatistics.endTime = ZonedDateTime.of(LocalDateTime.MAX, ZoneOffset.UTC); resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString(); synchronized (this) { this.addressResolutionStatistics.put(identifier, resolutionStatistics); } return identifier; } void recordAddressResolutionEnd(String identifier) { if (StringUtils.isEmpty(identifier)) { return; } ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); synchronized (this) { if (!this.addressResolutionStatistics.containsKey(identifier)) { throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start before calling end"); } if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier); resolutionStatistics.endTime = responseTime; } } @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); synchronized (this) { stringBuilder.append("RequestStartTime: ") .append("\"").append(this.requestStartTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("RequestEndTime: ") .append("\"").append(this.requestEndTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("Duration: ") .append(Duration.between(requestStartTime, requestEndTime).toMillis()) .append(" ms, ") .append("Connection Mode : " + this.connectionMode.toString() + ", ") .append("NUMBER of regions attempted: ") .append(this.regionsContacted.isEmpty() ? 1 : this.regionsContacted.size()) .append(System.lineSeparator()); for (StoreResponseStatistics storeResponseStatistics : this.responseStatisticsList) { stringBuilder.append(storeResponseStatistics.toString()).append(System.lineSeparator()); } for (AddressResolutionStatistics value : this.addressResolutionStatistics.values()) { stringBuilder.append(value.toString()).append(System.lineSeparator()); } int supplementalResponseStatisticsListCount = this.supplementalResponseStatisticsList.size(); int initialIndex = Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0); if (initialIndex != 0) { stringBuilder.append(" -- Displaying only the last ") .append(MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING) .append(" head/headfeed requests. Total head/headfeed requests: ") .append(supplementalResponseStatisticsListCount); } for (int i = initialIndex; i < supplementalResponseStatisticsListCount; i++) { stringBuilder.append(this.supplementalResponseStatisticsList.get(i).toString()).append(System.lineSeparator()); } if (this.gatewayStatistic != null) { stringBuilder.append(this.gatewayStatistic).append(System.lineSeparator()); } printSystemInformation(stringBuilder); } String requestStatsString = stringBuilder.toString(); if (!requestStatsString.isEmpty()) { return System.lineSeparator() + requestStatsString; } return StringUtils.EMPTY; } List<URI> getContactedReplicas() { return contactedReplicas; } void setContactedReplicas(List<URI> contactedReplicas) { this.contactedReplicas = contactedReplicas; } Set<URI> getFailedReplicas() { return failedReplicas; } void setFailedReplicas(Set<URI> failedReplicas) { this.failedReplicas = failedReplicas; } Set<URI> getRegionsContacted() { return regionsContacted; } void setRegionsContacted(Set<URI> regionsContacted) { this.regionsContacted = regionsContacted; } private static String formatDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } return dateTime.format(responseTimeFormatter); } private class StoreResponseStatistics { private ZonedDateTime requestResponseTime; private StoreResult storeResult; private ResourceType requestResourceType; private OperationType requestOperationType; @Override public String toString() { return "StoreResponseStatistics{" + "requestResponseTime=\"" + formatDateTime(requestResponseTime) + "\"" + ", storeResult=" + storeResult + ", requestResourceType=" + requestResourceType + ", requestOperationType=" + requestOperationType + '}'; } } private class AddressResolutionStatistics { private ZonedDateTime startTime; private ZonedDateTime endTime; private String targetEndpoint; AddressResolutionStatistics() { } @Override public String toString() { return "AddressResolutionStatistics{" + "startTime=\"" + formatDateTime(startTime) + "\"" + ", endTime=\"" + formatDateTime(endTime) + "\"" + ", targetEndpoint='" + targetEndpoint + '\'' + '}'; } } private class GatewayStatistic { private OperationType operationType; private int statusCode; private int subStatusCode; private ISessionToken sessionToken; private double requestCharge; public String toString() { return "Gateway statistics{ " + "Operation Type : " + this.operationType + "Status Code : " + this.statusCode + "Substatus Code : " + this.statusCode + "Session Token : " + (this.sessionToken != null ? this.sessionToken.convertToString() : null) + "Request Charge : " + requestCharge + '}'; } } }
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private final static OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean)ManagementFactory.getOperatingSystemMXBean(); private ZonedDateTime requestStartTime; private ZonedDateTime requestEndTime; private ConnectionMode connectionMode; private List<StoreResponseStatistics> responseStatisticsList; private List<StoreResponseStatistics> supplementalResponseStatisticsList; private Map<String, AddressResolutionStatistics> addressResolutionStatistics; private GatewayStatistic gatewayStatistic; private List<URI> contactedReplicas; private Set<URI> failedReplicas; private Set<URI> regionsContacted; ClientSideRequestStatistics() { this.requestStartTime = ZonedDateTime.now(ZoneOffset.UTC); this.requestEndTime = ZonedDateTime.now(ZoneOffset.UTC); this.responseStatisticsList = new ArrayList<>(); this.supplementalResponseStatisticsList = new ArrayList<>(); this.addressResolutionStatistics = new HashMap<>(); this.contactedReplicas = new ArrayList<>(); this.failedReplicas = new HashSet<>(); this.regionsContacted = new HashSet<>(); this.connectionMode = ConnectionMode.DIRECT; } Duration getRequestLatency() { return Duration.between(requestStartTime, requestEndTime); } private boolean isCPUOverloaded() { return false; } void recordResponse(RxDocumentServiceRequest request, StoreResult storeResult) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.DIRECT; StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics(); storeResponseStatistics.requestResponseTime = responseTime; storeResponseStatistics.storeResult = storeResult; storeResponseStatistics.requestOperationType = request.getOperationType(); storeResponseStatistics.requestResourceType = request.getResourceType(); URI locationEndPoint = null; if (request.requestContext.locationEndpointToRoute != null) { try { locationEndPoint = request.requestContext.locationEndpointToRoute.toURI(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } if (locationEndPoint != null) { this.regionsContacted.add(locationEndPoint); } if (storeResponseStatistics.requestOperationType == OperationType.Head || storeResponseStatistics.requestOperationType == OperationType.HeadFeed) { this.supplementalResponseStatisticsList.add(storeResponseStatistics); } else { this.responseStatisticsList.add(storeResponseStatistics); } } } void recordGatewayResponse(RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponse storeResponse, CosmosClientException exception) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.GATEWAY; synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } this.gatewayStatistic = new GatewayStatistic(); this.gatewayStatistic.operationType = rxDocumentServiceRequest.getOperationType(); if (storeResponse != null) { this.gatewayStatistic.statusCode = storeResponse.getStatus(); this.gatewayStatistic.subStatusCode = DirectBridgeInternal.getSubStatusCode(storeResponse); this.gatewayStatistic.sessionToken = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN); this.gatewayStatistic.requestCharge = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE); } else if(exception != null){ this.gatewayStatistic.statusCode = exception.getStatusCode(); this.gatewayStatistic.subStatusCode = exception.getSubStatusCode(); } } } String recordAddressResolutionStart(URI targetEndpoint) { String identifier = Utils.randomUUID().toString(); AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics(); resolutionStatistics.startTime = ZonedDateTime.now(ZoneOffset.UTC); resolutionStatistics.endTime = ZonedDateTime.of(LocalDateTime.MAX, ZoneOffset.UTC); resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString(); synchronized (this) { this.addressResolutionStatistics.put(identifier, resolutionStatistics); } return identifier; } void recordAddressResolutionEnd(String identifier) { if (StringUtils.isEmpty(identifier)) { return; } ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); synchronized (this) { if (!this.addressResolutionStatistics.containsKey(identifier)) { throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start before calling end"); } if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier); resolutionStatistics.endTime = responseTime; } } @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); synchronized (this) { stringBuilder.append("RequestStartTime: ") .append("\"").append(this.requestStartTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("RequestEndTime: ") .append("\"").append(this.requestEndTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("Duration: ") .append(Duration.between(requestStartTime, requestEndTime).toMillis()) .append(" ms, ") .append("Connection Mode : " + this.connectionMode.toString() + ", ") .append("NUMBER of regions attempted: ") .append(this.regionsContacted.isEmpty() ? 1 : this.regionsContacted.size()) .append(System.lineSeparator()); for (StoreResponseStatistics storeResponseStatistics : this.responseStatisticsList) { stringBuilder.append(storeResponseStatistics.toString()).append(System.lineSeparator()); } for (AddressResolutionStatistics value : this.addressResolutionStatistics.values()) { stringBuilder.append(value.toString()).append(System.lineSeparator()); } int supplementalResponseStatisticsListCount = this.supplementalResponseStatisticsList.size(); int initialIndex = Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0); if (initialIndex != 0) { stringBuilder.append(" -- Displaying only the last ") .append(MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING) .append(" head/headfeed requests. Total head/headfeed requests: ") .append(supplementalResponseStatisticsListCount); } for (int i = initialIndex; i < supplementalResponseStatisticsListCount; i++) { stringBuilder.append(this.supplementalResponseStatisticsList.get(i).toString()).append(System.lineSeparator()); } if (this.gatewayStatistic != null) { stringBuilder.append(this.gatewayStatistic).append(System.lineSeparator()); } printSystemInformation(stringBuilder); } String requestStatsString = stringBuilder.toString(); if (!requestStatsString.isEmpty()) { return System.lineSeparator() + requestStatsString; } return StringUtils.EMPTY; } List<URI> getContactedReplicas() { return contactedReplicas; } void setContactedReplicas(List<URI> contactedReplicas) { this.contactedReplicas = contactedReplicas; } Set<URI> getFailedReplicas() { return failedReplicas; } void setFailedReplicas(Set<URI> failedReplicas) { this.failedReplicas = failedReplicas; } Set<URI> getRegionsContacted() { return regionsContacted; } void setRegionsContacted(Set<URI> regionsContacted) { this.regionsContacted = regionsContacted; } private static String formatDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } return dateTime.format(responseTimeFormatter); } private class StoreResponseStatistics { private ZonedDateTime requestResponseTime; private StoreResult storeResult; private ResourceType requestResourceType; private OperationType requestOperationType; @Override public String toString() { return "StoreResponseStatistics{" + "requestResponseTime=\"" + formatDateTime(requestResponseTime) + "\"" + ", storeResult=" + storeResult + ", requestResourceType=" + requestResourceType + ", requestOperationType=" + requestOperationType + '}'; } } private class AddressResolutionStatistics { private ZonedDateTime startTime; private ZonedDateTime endTime; private String targetEndpoint; AddressResolutionStatistics() { } @Override public String toString() { return "AddressResolutionStatistics{" + "startTime=\"" + formatDateTime(startTime) + "\"" + ", endTime=\"" + formatDateTime(endTime) + "\"" + ", targetEndpoint='" + targetEndpoint + '\'' + '}'; } } private class GatewayStatistic { private OperationType operationType; private int statusCode; private int subStatusCode; private String sessionToken; private String requestCharge; public String toString() { return "Gateway statistics{ " + "Operation Type : " + this.operationType + "Status Code : " + this.statusCode + "Substatus Code : " + this.statusCode + "Session Token : " + this.sessionToken + "Request Charge : " + requestCharge + '}'; } } }
see this comment from stackoverflow around OperatingSystemMXBean > Thanks for this answer but i have observed that running this code increases CPU utilization by 30-40%. This means that we get increased CPU Utilization whenever we run this code from: https://stackoverflow.com/questions/19781087/using-operatingsystemmxbean-to-get-cpu-usage
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String usedMemory = totalMemory - freeMemory + " KB"; String availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB"; OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean) ManagementFactory.getOperatingSystemMXBean(); String processCpuLoad = Double.toString(mbean.getProcessCpuLoad()); String systemCpuLoad = Double.toString(mbean.getSystemCpuLoad()); stringBuilder.append("System State Information -------").append(System.lineSeparator()); stringBuilder.append("Used Memory : " + usedMemory).append(System.lineSeparator()); stringBuilder.append("Available Memory : " + availableMemory).append(System.lineSeparator()); stringBuilder.append("CPU Process Load : " + processCpuLoad).append(System.lineSeparator()); stringBuilder.append("CPU System Load : " + systemCpuLoad).append(System.lineSeparator()); } catch (Exception e) { } }
String systemCpuLoad = Double.toString(mbean.getSystemCpuLoad());
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String usedMemory = totalMemory - freeMemory + " KB"; String availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB"; String processCpuLoad = Double.toString(this.mbean.getProcessCpuLoad()); String systemCpuLoad = Double.toString(this.mbean.getSystemCpuLoad()); stringBuilder.append("System State Information -------").append(System.lineSeparator()); stringBuilder.append("Used Memory : " + usedMemory).append(System.lineSeparator()); stringBuilder.append("Available Memory : " + availableMemory).append(System.lineSeparator()); stringBuilder.append("CPU Process Load : " + processCpuLoad).append(System.lineSeparator()); stringBuilder.append("CPU System Load : " + systemCpuLoad).append(System.lineSeparator()); } catch (Exception e) { } }
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private ZonedDateTime requestStartTime; private ZonedDateTime requestEndTime; private ConnectionMode connectionMode; private List<StoreResponseStatistics> responseStatisticsList; private List<StoreResponseStatistics> supplementalResponseStatisticsList; private Map<String, AddressResolutionStatistics> addressResolutionStatistics; private GatewayStatistic gatewayStatistic; private List<URI> contactedReplicas; private Set<URI> failedReplicas; private Set<URI> regionsContacted; ClientSideRequestStatistics() { this.requestStartTime = ZonedDateTime.now(ZoneOffset.UTC); this.requestEndTime = ZonedDateTime.now(ZoneOffset.UTC); this.responseStatisticsList = new ArrayList<>(); this.supplementalResponseStatisticsList = new ArrayList<>(); this.addressResolutionStatistics = new HashMap<>(); this.contactedReplicas = new ArrayList<>(); this.failedReplicas = new HashSet<>(); this.regionsContacted = new HashSet<>(); this.connectionMode = ConnectionMode.DIRECT; } Duration getRequestLatency() { return Duration.between(requestStartTime, requestEndTime); } private boolean isCPUOverloaded() { return false; } void recordResponse(RxDocumentServiceRequest request, StoreResult storeResult) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.DIRECT; StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics(); storeResponseStatistics.requestResponseTime = responseTime; storeResponseStatistics.storeResult = storeResult; storeResponseStatistics.requestOperationType = request.getOperationType(); storeResponseStatistics.requestResourceType = request.getResourceType(); URI locationEndPoint = null; if (request.requestContext.locationEndpointToRoute != null) { try { locationEndPoint = request.requestContext.locationEndpointToRoute.toURI(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } if (locationEndPoint != null) { this.regionsContacted.add(locationEndPoint); } if (storeResponseStatistics.requestOperationType == OperationType.Head || storeResponseStatistics.requestOperationType == OperationType.HeadFeed) { this.supplementalResponseStatisticsList.add(storeResponseStatistics); } else { this.responseStatisticsList.add(storeResponseStatistics); } } } void recordGatewayResponse(RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponse storeResponse, CosmosClientException exception) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.GATEWAY; synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } this.gatewayStatistic = new GatewayStatistic(); this.gatewayStatistic.operationType = rxDocumentServiceRequest.getOperationType(); if (storeResponse != null) { this.gatewayStatistic.statusCode = storeResponse.getStatus(); this.gatewayStatistic.subStatusCode = DirectBridgeInternal.getSubStatusCode(storeResponse); ISessionToken sessionToken = null; String headerValue; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN)) != null) { sessionToken = SessionTokenHelper.parse(headerValue); } double requestCharge = 0; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE)) != null) { requestCharge = Double.parseDouble(headerValue); } this.gatewayStatistic.sessionToken = sessionToken; this.gatewayStatistic.requestCharge = requestCharge; } else if(exception != null){ this.gatewayStatistic.statusCode = exception.getStatusCode(); this.gatewayStatistic.subStatusCode = exception.getSubStatusCode(); } } } String recordAddressResolutionStart(URI targetEndpoint) { String identifier = Utils.randomUUID().toString(); AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics(); resolutionStatistics.startTime = ZonedDateTime.now(ZoneOffset.UTC); resolutionStatistics.endTime = ZonedDateTime.of(LocalDateTime.MAX, ZoneOffset.UTC); resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString(); synchronized (this) { this.addressResolutionStatistics.put(identifier, resolutionStatistics); } return identifier; } void recordAddressResolutionEnd(String identifier) { if (StringUtils.isEmpty(identifier)) { return; } ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); synchronized (this) { if (!this.addressResolutionStatistics.containsKey(identifier)) { throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start before calling end"); } if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier); resolutionStatistics.endTime = responseTime; } } @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); synchronized (this) { stringBuilder.append("RequestStartTime: ") .append("\"").append(this.requestStartTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("RequestEndTime: ") .append("\"").append(this.requestEndTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("Duration: ") .append(Duration.between(requestStartTime, requestEndTime).toMillis()) .append(" ms, ") .append("Connection Mode : " + this.connectionMode.toString() + ", ") .append("NUMBER of regions attempted: ") .append(this.regionsContacted.isEmpty() ? 1 : this.regionsContacted.size()) .append(System.lineSeparator()); for (StoreResponseStatistics storeResponseStatistics : this.responseStatisticsList) { stringBuilder.append(storeResponseStatistics.toString()).append(System.lineSeparator()); } for (AddressResolutionStatistics value : this.addressResolutionStatistics.values()) { stringBuilder.append(value.toString()).append(System.lineSeparator()); } int supplementalResponseStatisticsListCount = this.supplementalResponseStatisticsList.size(); int initialIndex = Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0); if (initialIndex != 0) { stringBuilder.append(" -- Displaying only the last ") .append(MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING) .append(" head/headfeed requests. Total head/headfeed requests: ") .append(supplementalResponseStatisticsListCount); } for (int i = initialIndex; i < supplementalResponseStatisticsListCount; i++) { stringBuilder.append(this.supplementalResponseStatisticsList.get(i).toString()).append(System.lineSeparator()); } if (this.gatewayStatistic != null) { stringBuilder.append(this.gatewayStatistic).append(System.lineSeparator()); } printSystemInformation(stringBuilder); } String requestStatsString = stringBuilder.toString(); if (!requestStatsString.isEmpty()) { return System.lineSeparator() + requestStatsString; } return StringUtils.EMPTY; } List<URI> getContactedReplicas() { return contactedReplicas; } void setContactedReplicas(List<URI> contactedReplicas) { this.contactedReplicas = contactedReplicas; } Set<URI> getFailedReplicas() { return failedReplicas; } void setFailedReplicas(Set<URI> failedReplicas) { this.failedReplicas = failedReplicas; } Set<URI> getRegionsContacted() { return regionsContacted; } void setRegionsContacted(Set<URI> regionsContacted) { this.regionsContacted = regionsContacted; } private static String formatDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } return dateTime.format(responseTimeFormatter); } private class StoreResponseStatistics { private ZonedDateTime requestResponseTime; private StoreResult storeResult; private ResourceType requestResourceType; private OperationType requestOperationType; @Override public String toString() { return "StoreResponseStatistics{" + "requestResponseTime=\"" + formatDateTime(requestResponseTime) + "\"" + ", storeResult=" + storeResult + ", requestResourceType=" + requestResourceType + ", requestOperationType=" + requestOperationType + '}'; } } private class AddressResolutionStatistics { private ZonedDateTime startTime; private ZonedDateTime endTime; private String targetEndpoint; AddressResolutionStatistics() { } @Override public String toString() { return "AddressResolutionStatistics{" + "startTime=\"" + formatDateTime(startTime) + "\"" + ", endTime=\"" + formatDateTime(endTime) + "\"" + ", targetEndpoint='" + targetEndpoint + '\'' + '}'; } } private class GatewayStatistic { private OperationType operationType; private int statusCode; private int subStatusCode; private ISessionToken sessionToken; private double requestCharge; public String toString() { return "Gateway statistics{ " + "Operation Type : " + this.operationType + "Status Code : " + this.statusCode + "Substatus Code : " + this.statusCode + "Session Token : " + (this.sessionToken != null ? this.sessionToken.convertToString() : null) + "Request Charge : " + requestCharge + '}'; } } }
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private final static OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean)ManagementFactory.getOperatingSystemMXBean(); private ZonedDateTime requestStartTime; private ZonedDateTime requestEndTime; private ConnectionMode connectionMode; private List<StoreResponseStatistics> responseStatisticsList; private List<StoreResponseStatistics> supplementalResponseStatisticsList; private Map<String, AddressResolutionStatistics> addressResolutionStatistics; private GatewayStatistic gatewayStatistic; private List<URI> contactedReplicas; private Set<URI> failedReplicas; private Set<URI> regionsContacted; ClientSideRequestStatistics() { this.requestStartTime = ZonedDateTime.now(ZoneOffset.UTC); this.requestEndTime = ZonedDateTime.now(ZoneOffset.UTC); this.responseStatisticsList = new ArrayList<>(); this.supplementalResponseStatisticsList = new ArrayList<>(); this.addressResolutionStatistics = new HashMap<>(); this.contactedReplicas = new ArrayList<>(); this.failedReplicas = new HashSet<>(); this.regionsContacted = new HashSet<>(); this.connectionMode = ConnectionMode.DIRECT; } Duration getRequestLatency() { return Duration.between(requestStartTime, requestEndTime); } private boolean isCPUOverloaded() { return false; } void recordResponse(RxDocumentServiceRequest request, StoreResult storeResult) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.DIRECT; StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics(); storeResponseStatistics.requestResponseTime = responseTime; storeResponseStatistics.storeResult = storeResult; storeResponseStatistics.requestOperationType = request.getOperationType(); storeResponseStatistics.requestResourceType = request.getResourceType(); URI locationEndPoint = null; if (request.requestContext.locationEndpointToRoute != null) { try { locationEndPoint = request.requestContext.locationEndpointToRoute.toURI(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } if (locationEndPoint != null) { this.regionsContacted.add(locationEndPoint); } if (storeResponseStatistics.requestOperationType == OperationType.Head || storeResponseStatistics.requestOperationType == OperationType.HeadFeed) { this.supplementalResponseStatisticsList.add(storeResponseStatistics); } else { this.responseStatisticsList.add(storeResponseStatistics); } } } void recordGatewayResponse(RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponse storeResponse, CosmosClientException exception) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.GATEWAY; synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } this.gatewayStatistic = new GatewayStatistic(); this.gatewayStatistic.operationType = rxDocumentServiceRequest.getOperationType(); if (storeResponse != null) { this.gatewayStatistic.statusCode = storeResponse.getStatus(); this.gatewayStatistic.subStatusCode = DirectBridgeInternal.getSubStatusCode(storeResponse); this.gatewayStatistic.sessionToken = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN); this.gatewayStatistic.requestCharge = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE); } else if(exception != null){ this.gatewayStatistic.statusCode = exception.getStatusCode(); this.gatewayStatistic.subStatusCode = exception.getSubStatusCode(); } } } String recordAddressResolutionStart(URI targetEndpoint) { String identifier = Utils.randomUUID().toString(); AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics(); resolutionStatistics.startTime = ZonedDateTime.now(ZoneOffset.UTC); resolutionStatistics.endTime = ZonedDateTime.of(LocalDateTime.MAX, ZoneOffset.UTC); resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString(); synchronized (this) { this.addressResolutionStatistics.put(identifier, resolutionStatistics); } return identifier; } void recordAddressResolutionEnd(String identifier) { if (StringUtils.isEmpty(identifier)) { return; } ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); synchronized (this) { if (!this.addressResolutionStatistics.containsKey(identifier)) { throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start before calling end"); } if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier); resolutionStatistics.endTime = responseTime; } } @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); synchronized (this) { stringBuilder.append("RequestStartTime: ") .append("\"").append(this.requestStartTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("RequestEndTime: ") .append("\"").append(this.requestEndTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("Duration: ") .append(Duration.between(requestStartTime, requestEndTime).toMillis()) .append(" ms, ") .append("Connection Mode : " + this.connectionMode.toString() + ", ") .append("NUMBER of regions attempted: ") .append(this.regionsContacted.isEmpty() ? 1 : this.regionsContacted.size()) .append(System.lineSeparator()); for (StoreResponseStatistics storeResponseStatistics : this.responseStatisticsList) { stringBuilder.append(storeResponseStatistics.toString()).append(System.lineSeparator()); } for (AddressResolutionStatistics value : this.addressResolutionStatistics.values()) { stringBuilder.append(value.toString()).append(System.lineSeparator()); } int supplementalResponseStatisticsListCount = this.supplementalResponseStatisticsList.size(); int initialIndex = Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0); if (initialIndex != 0) { stringBuilder.append(" -- Displaying only the last ") .append(MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING) .append(" head/headfeed requests. Total head/headfeed requests: ") .append(supplementalResponseStatisticsListCount); } for (int i = initialIndex; i < supplementalResponseStatisticsListCount; i++) { stringBuilder.append(this.supplementalResponseStatisticsList.get(i).toString()).append(System.lineSeparator()); } if (this.gatewayStatistic != null) { stringBuilder.append(this.gatewayStatistic).append(System.lineSeparator()); } printSystemInformation(stringBuilder); } String requestStatsString = stringBuilder.toString(); if (!requestStatsString.isEmpty()) { return System.lineSeparator() + requestStatsString; } return StringUtils.EMPTY; } List<URI> getContactedReplicas() { return contactedReplicas; } void setContactedReplicas(List<URI> contactedReplicas) { this.contactedReplicas = contactedReplicas; } Set<URI> getFailedReplicas() { return failedReplicas; } void setFailedReplicas(Set<URI> failedReplicas) { this.failedReplicas = failedReplicas; } Set<URI> getRegionsContacted() { return regionsContacted; } void setRegionsContacted(Set<URI> regionsContacted) { this.regionsContacted = regionsContacted; } private static String formatDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } return dateTime.format(responseTimeFormatter); } private class StoreResponseStatistics { private ZonedDateTime requestResponseTime; private StoreResult storeResult; private ResourceType requestResourceType; private OperationType requestOperationType; @Override public String toString() { return "StoreResponseStatistics{" + "requestResponseTime=\"" + formatDateTime(requestResponseTime) + "\"" + ", storeResult=" + storeResult + ", requestResourceType=" + requestResourceType + ", requestOperationType=" + requestOperationType + '}'; } } private class AddressResolutionStatistics { private ZonedDateTime startTime; private ZonedDateTime endTime; private String targetEndpoint; AddressResolutionStatistics() { } @Override public String toString() { return "AddressResolutionStatistics{" + "startTime=\"" + formatDateTime(startTime) + "\"" + ", endTime=\"" + formatDateTime(endTime) + "\"" + ", targetEndpoint='" + targetEndpoint + '\'' + '}'; } } private class GatewayStatistic { private OperationType operationType; private int statusCode; private int subStatusCode; private String sessionToken; private String requestCharge; public String toString() { return "Gateway statistics{ " + "Operation Type : " + this.operationType + "Status Code : " + this.statusCode + "Substatus Code : " + this.statusCode + "Session Token : " + this.sessionToken + "Request Charge : " + requestCharge + '}'; } } }
Openjdk has com.sun.management available , and is giving numbers. I will try with another jdk maybe IBM
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String used_Memory = totalMemory - freeMemory + " KB"; String available_Memory = (maxMemory - (totalMemory - freeMemory)) + " KB"; OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean) ManagementFactory.getOperatingSystemMXBean(); String processCpuLoad = Double.toString(mbean.getProcessCpuLoad()); String systemCpuLoad = Double.toString(mbean.getSystemCpuLoad()); stringBuilder.append("System State Information -------").append(System.lineSeparator()); stringBuilder.append("Used Memory : " + used_Memory).append(System.lineSeparator()); stringBuilder.append("Available Memory : " + available_Memory).append(System.lineSeparator()); stringBuilder.append("CPU Process Load : " + processCpuLoad).append(System.lineSeparator()); stringBuilder.append("CPU System Load : " + systemCpuLoad).append(System.lineSeparator()); } catch (Exception e) { } }
OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean)
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String usedMemory = totalMemory - freeMemory + " KB"; String availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB"; String processCpuLoad = Double.toString(this.mbean.getProcessCpuLoad()); String systemCpuLoad = Double.toString(this.mbean.getSystemCpuLoad()); stringBuilder.append("System State Information -------").append(System.lineSeparator()); stringBuilder.append("Used Memory : " + usedMemory).append(System.lineSeparator()); stringBuilder.append("Available Memory : " + availableMemory).append(System.lineSeparator()); stringBuilder.append("CPU Process Load : " + processCpuLoad).append(System.lineSeparator()); stringBuilder.append("CPU System Load : " + systemCpuLoad).append(System.lineSeparator()); } catch (Exception e) { } }
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private ZonedDateTime requestStartTime; private ZonedDateTime requestEndTime; private ConnectionMode connectionMode; private List<StoreResponseStatistics> responseStatisticsList; private List<StoreResponseStatistics> supplementalResponseStatisticsList; private Map<String, AddressResolutionStatistics> addressResolutionStatistics; private GatewayStatistic gatewayStatistic; private List<URI> contactedReplicas; private Set<URI> failedReplicas; private Set<URI> regionsContacted; ClientSideRequestStatistics() { this.requestStartTime = ZonedDateTime.now(ZoneOffset.UTC); this.requestEndTime = ZonedDateTime.now(ZoneOffset.UTC); this.responseStatisticsList = new ArrayList<>(); this.supplementalResponseStatisticsList = new ArrayList<>(); this.addressResolutionStatistics = new HashMap<>(); this.contactedReplicas = new ArrayList<>(); this.failedReplicas = new HashSet<>(); this.regionsContacted = new HashSet<>(); this.connectionMode = ConnectionMode.DIRECT; } Duration getRequestLatency() { return Duration.between(requestStartTime, requestEndTime); } private boolean isCPUOverloaded() { return false; } void recordResponse(RxDocumentServiceRequest request, StoreResult storeResult) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.DIRECT; StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics(); storeResponseStatistics.requestResponseTime = responseTime; storeResponseStatistics.storeResult = storeResult; storeResponseStatistics.requestOperationType = request.getOperationType(); storeResponseStatistics.requestResourceType = request.getResourceType(); URI locationEndPoint = null; if (request.requestContext.locationEndpointToRoute != null) { try { locationEndPoint = request.requestContext.locationEndpointToRoute.toURI(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } if (locationEndPoint != null) { this.regionsContacted.add(locationEndPoint); } if (storeResponseStatistics.requestOperationType == OperationType.Head || storeResponseStatistics.requestOperationType == OperationType.HeadFeed) { this.supplementalResponseStatisticsList.add(storeResponseStatistics); } else { this.responseStatisticsList.add(storeResponseStatistics); } } } void recordGatewayResponse(RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponse storeResponse, CosmosClientException exception) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.GATEWAY; synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } this.gatewayStatistic = new GatewayStatistic(); this.gatewayStatistic.operationType = rxDocumentServiceRequest.getOperationType(); if (storeResponse != null) { this.gatewayStatistic.statusCode = storeResponse.getStatus(); this.gatewayStatistic.subStatusCode = DirectBridgeInternal.getSubStatusCode(storeResponse); ISessionToken sessionToken = null; String headerValue; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN)) != null) { sessionToken = SessionTokenHelper.parse(headerValue); } double requestCharge = 0; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE)) != null) { requestCharge = Double.parseDouble(headerValue); } this.gatewayStatistic.sessionToken = sessionToken; this.gatewayStatistic.requestCharge = requestCharge; } else if(exception != null){ this.gatewayStatistic.statusCode = exception.getStatusCode(); this.gatewayStatistic.subStatusCode = exception.getSubStatusCode(); } } } String recordAddressResolutionStart(URI targetEndpoint) { String identifier = Utils.randomUUID().toString(); AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics(); resolutionStatistics.startTime = ZonedDateTime.now(ZoneOffset.UTC); resolutionStatistics.endTime = ZonedDateTime.of(LocalDateTime.MAX, ZoneOffset.UTC); resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString(); synchronized (this) { this.addressResolutionStatistics.put(identifier, resolutionStatistics); } return identifier; } void recordAddressResolutionEnd(String identifier) { if (StringUtils.isEmpty(identifier)) { return; } ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); synchronized (this) { if (!this.addressResolutionStatistics.containsKey(identifier)) { throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start before calling end"); } if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier); resolutionStatistics.endTime = responseTime; } } @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); synchronized (this) { stringBuilder.append("RequestStartTime: ") .append("\"").append(this.requestStartTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("RequestEndTime: ") .append("\"").append(this.requestEndTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("Duration: ") .append(Duration.between(requestStartTime, requestEndTime).toMillis()) .append(" ms, ") .append("Connection Mode : " + this.connectionMode.toString() + ", ") .append("NUMBER of regions attempted: ") .append(this.regionsContacted.isEmpty() ? 1 : this.regionsContacted.size()) .append(System.lineSeparator()); for (StoreResponseStatistics storeResponseStatistics : this.responseStatisticsList) { stringBuilder.append(storeResponseStatistics.toString()).append(System.lineSeparator()); } for (AddressResolutionStatistics value : this.addressResolutionStatistics.values()) { stringBuilder.append(value.toString()).append(System.lineSeparator()); } int supplementalResponseStatisticsListCount = this.supplementalResponseStatisticsList.size(); int initialIndex = Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0); if (initialIndex != 0) { stringBuilder.append(" -- Displaying only the last ") .append(MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING) .append(" head/headfeed requests. Total head/headfeed requests: ") .append(supplementalResponseStatisticsListCount); } for (int i = initialIndex; i < supplementalResponseStatisticsListCount; i++) { stringBuilder.append(this.supplementalResponseStatisticsList.get(i).toString()).append(System.lineSeparator()); } if (this.gatewayStatistic != null) { stringBuilder.append(this.gatewayStatistic).append(System.lineSeparator()); } printSystemInformation(stringBuilder); } String requestStatsString = stringBuilder.toString(); if (!requestStatsString.isEmpty()) { return System.lineSeparator() + requestStatsString; } return StringUtils.EMPTY; } List<URI> getContactedReplicas() { return contactedReplicas; } void setContactedReplicas(List<URI> contactedReplicas) { this.contactedReplicas = contactedReplicas; } Set<URI> getFailedReplicas() { return failedReplicas; } void setFailedReplicas(Set<URI> failedReplicas) { this.failedReplicas = failedReplicas; } Set<URI> getRegionsContacted() { return regionsContacted; } void setRegionsContacted(Set<URI> regionsContacted) { this.regionsContacted = regionsContacted; } private static String formatDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } return dateTime.format(responseTimeFormatter); } private class StoreResponseStatistics { private ZonedDateTime requestResponseTime; private StoreResult storeResult; private ResourceType requestResourceType; private OperationType requestOperationType; @Override public String toString() { return "StoreResponseStatistics{" + "requestResponseTime=\"" + formatDateTime(requestResponseTime) + "\"" + ", storeResult=" + storeResult + ", requestResourceType=" + requestResourceType + ", requestOperationType=" + requestOperationType + '}'; } } private class AddressResolutionStatistics { private ZonedDateTime startTime; private ZonedDateTime endTime; private String targetEndpoint; AddressResolutionStatistics() { } @Override public String toString() { return "AddressResolutionStatistics{" + "startTime=\"" + formatDateTime(startTime) + "\"" + ", endTime=\"" + formatDateTime(endTime) + "\"" + ", targetEndpoint='" + targetEndpoint + '\'' + '}'; } } private class GatewayStatistic { private OperationType operationType; private int statusCode; private int subStatusCode; private ISessionToken sessionToken; private double requestCharge; public String toString() { return "Gateway statistics{ " + "Operation Type : " + this.operationType + "Status Code : " + this.statusCode + "Substatus Code : " + this.statusCode + "Session Token : " + (this.sessionToken != null ? this.sessionToken.convertToString() : null) + "Request Charge : " + requestCharge + '}'; } } }
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private final static OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean)ManagementFactory.getOperatingSystemMXBean(); private ZonedDateTime requestStartTime; private ZonedDateTime requestEndTime; private ConnectionMode connectionMode; private List<StoreResponseStatistics> responseStatisticsList; private List<StoreResponseStatistics> supplementalResponseStatisticsList; private Map<String, AddressResolutionStatistics> addressResolutionStatistics; private GatewayStatistic gatewayStatistic; private List<URI> contactedReplicas; private Set<URI> failedReplicas; private Set<URI> regionsContacted; ClientSideRequestStatistics() { this.requestStartTime = ZonedDateTime.now(ZoneOffset.UTC); this.requestEndTime = ZonedDateTime.now(ZoneOffset.UTC); this.responseStatisticsList = new ArrayList<>(); this.supplementalResponseStatisticsList = new ArrayList<>(); this.addressResolutionStatistics = new HashMap<>(); this.contactedReplicas = new ArrayList<>(); this.failedReplicas = new HashSet<>(); this.regionsContacted = new HashSet<>(); this.connectionMode = ConnectionMode.DIRECT; } Duration getRequestLatency() { return Duration.between(requestStartTime, requestEndTime); } private boolean isCPUOverloaded() { return false; } void recordResponse(RxDocumentServiceRequest request, StoreResult storeResult) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.DIRECT; StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics(); storeResponseStatistics.requestResponseTime = responseTime; storeResponseStatistics.storeResult = storeResult; storeResponseStatistics.requestOperationType = request.getOperationType(); storeResponseStatistics.requestResourceType = request.getResourceType(); URI locationEndPoint = null; if (request.requestContext.locationEndpointToRoute != null) { try { locationEndPoint = request.requestContext.locationEndpointToRoute.toURI(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } if (locationEndPoint != null) { this.regionsContacted.add(locationEndPoint); } if (storeResponseStatistics.requestOperationType == OperationType.Head || storeResponseStatistics.requestOperationType == OperationType.HeadFeed) { this.supplementalResponseStatisticsList.add(storeResponseStatistics); } else { this.responseStatisticsList.add(storeResponseStatistics); } } } void recordGatewayResponse(RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponse storeResponse, CosmosClientException exception) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.GATEWAY; synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } this.gatewayStatistic = new GatewayStatistic(); this.gatewayStatistic.operationType = rxDocumentServiceRequest.getOperationType(); if (storeResponse != null) { this.gatewayStatistic.statusCode = storeResponse.getStatus(); this.gatewayStatistic.subStatusCode = DirectBridgeInternal.getSubStatusCode(storeResponse); this.gatewayStatistic.sessionToken = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN); this.gatewayStatistic.requestCharge = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE); } else if(exception != null){ this.gatewayStatistic.statusCode = exception.getStatusCode(); this.gatewayStatistic.subStatusCode = exception.getSubStatusCode(); } } } String recordAddressResolutionStart(URI targetEndpoint) { String identifier = Utils.randomUUID().toString(); AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics(); resolutionStatistics.startTime = ZonedDateTime.now(ZoneOffset.UTC); resolutionStatistics.endTime = ZonedDateTime.of(LocalDateTime.MAX, ZoneOffset.UTC); resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString(); synchronized (this) { this.addressResolutionStatistics.put(identifier, resolutionStatistics); } return identifier; } void recordAddressResolutionEnd(String identifier) { if (StringUtils.isEmpty(identifier)) { return; } ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); synchronized (this) { if (!this.addressResolutionStatistics.containsKey(identifier)) { throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start before calling end"); } if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier); resolutionStatistics.endTime = responseTime; } } @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); synchronized (this) { stringBuilder.append("RequestStartTime: ") .append("\"").append(this.requestStartTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("RequestEndTime: ") .append("\"").append(this.requestEndTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("Duration: ") .append(Duration.between(requestStartTime, requestEndTime).toMillis()) .append(" ms, ") .append("Connection Mode : " + this.connectionMode.toString() + ", ") .append("NUMBER of regions attempted: ") .append(this.regionsContacted.isEmpty() ? 1 : this.regionsContacted.size()) .append(System.lineSeparator()); for (StoreResponseStatistics storeResponseStatistics : this.responseStatisticsList) { stringBuilder.append(storeResponseStatistics.toString()).append(System.lineSeparator()); } for (AddressResolutionStatistics value : this.addressResolutionStatistics.values()) { stringBuilder.append(value.toString()).append(System.lineSeparator()); } int supplementalResponseStatisticsListCount = this.supplementalResponseStatisticsList.size(); int initialIndex = Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0); if (initialIndex != 0) { stringBuilder.append(" -- Displaying only the last ") .append(MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING) .append(" head/headfeed requests. Total head/headfeed requests: ") .append(supplementalResponseStatisticsListCount); } for (int i = initialIndex; i < supplementalResponseStatisticsListCount; i++) { stringBuilder.append(this.supplementalResponseStatisticsList.get(i).toString()).append(System.lineSeparator()); } if (this.gatewayStatistic != null) { stringBuilder.append(this.gatewayStatistic).append(System.lineSeparator()); } printSystemInformation(stringBuilder); } String requestStatsString = stringBuilder.toString(); if (!requestStatsString.isEmpty()) { return System.lineSeparator() + requestStatsString; } return StringUtils.EMPTY; } List<URI> getContactedReplicas() { return contactedReplicas; } void setContactedReplicas(List<URI> contactedReplicas) { this.contactedReplicas = contactedReplicas; } Set<URI> getFailedReplicas() { return failedReplicas; } void setFailedReplicas(Set<URI> failedReplicas) { this.failedReplicas = failedReplicas; } Set<URI> getRegionsContacted() { return regionsContacted; } void setRegionsContacted(Set<URI> regionsContacted) { this.regionsContacted = regionsContacted; } private static String formatDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } return dateTime.format(responseTimeFormatter); } private class StoreResponseStatistics { private ZonedDateTime requestResponseTime; private StoreResult storeResult; private ResourceType requestResourceType; private OperationType requestOperationType; @Override public String toString() { return "StoreResponseStatistics{" + "requestResponseTime=\"" + formatDateTime(requestResponseTime) + "\"" + ", storeResult=" + storeResult + ", requestResourceType=" + requestResourceType + ", requestOperationType=" + requestOperationType + '}'; } } private class AddressResolutionStatistics { private ZonedDateTime startTime; private ZonedDateTime endTime; private String targetEndpoint; AddressResolutionStatistics() { } @Override public String toString() { return "AddressResolutionStatistics{" + "startTime=\"" + formatDateTime(startTime) + "\"" + ", endTime=\"" + formatDateTime(endTime) + "\"" + ", targetEndpoint='" + targetEndpoint + '\'' + '}'; } } private class GatewayStatistic { private OperationType operationType; private int statusCode; private int subStatusCode; private String sessionToken; private String requestCharge; public String toString() { return "Gateway statistics{ " + "Operation Type : " + this.operationType + "Status Code : " + this.statusCode + "Substatus Code : " + this.statusCode + "Session Token : " + this.sessionToken + "Request Charge : " + requestCharge + '}'; } } }
Tested on AdoptSDk,IBM SDK , ZULU they all have support for com.sun.management
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String used_Memory = totalMemory - freeMemory + " KB"; String available_Memory = (maxMemory - (totalMemory - freeMemory)) + " KB"; OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean) ManagementFactory.getOperatingSystemMXBean(); String processCpuLoad = Double.toString(mbean.getProcessCpuLoad()); String systemCpuLoad = Double.toString(mbean.getSystemCpuLoad()); stringBuilder.append("System State Information -------").append(System.lineSeparator()); stringBuilder.append("Used Memory : " + used_Memory).append(System.lineSeparator()); stringBuilder.append("Available Memory : " + available_Memory).append(System.lineSeparator()); stringBuilder.append("CPU Process Load : " + processCpuLoad).append(System.lineSeparator()); stringBuilder.append("CPU System Load : " + systemCpuLoad).append(System.lineSeparator()); } catch (Exception e) { } }
OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean)
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String usedMemory = totalMemory - freeMemory + " KB"; String availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB"; String processCpuLoad = Double.toString(this.mbean.getProcessCpuLoad()); String systemCpuLoad = Double.toString(this.mbean.getSystemCpuLoad()); stringBuilder.append("System State Information -------").append(System.lineSeparator()); stringBuilder.append("Used Memory : " + usedMemory).append(System.lineSeparator()); stringBuilder.append("Available Memory : " + availableMemory).append(System.lineSeparator()); stringBuilder.append("CPU Process Load : " + processCpuLoad).append(System.lineSeparator()); stringBuilder.append("CPU System Load : " + systemCpuLoad).append(System.lineSeparator()); } catch (Exception e) { } }
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private ZonedDateTime requestStartTime; private ZonedDateTime requestEndTime; private ConnectionMode connectionMode; private List<StoreResponseStatistics> responseStatisticsList; private List<StoreResponseStatistics> supplementalResponseStatisticsList; private Map<String, AddressResolutionStatistics> addressResolutionStatistics; private GatewayStatistic gatewayStatistic; private List<URI> contactedReplicas; private Set<URI> failedReplicas; private Set<URI> regionsContacted; ClientSideRequestStatistics() { this.requestStartTime = ZonedDateTime.now(ZoneOffset.UTC); this.requestEndTime = ZonedDateTime.now(ZoneOffset.UTC); this.responseStatisticsList = new ArrayList<>(); this.supplementalResponseStatisticsList = new ArrayList<>(); this.addressResolutionStatistics = new HashMap<>(); this.contactedReplicas = new ArrayList<>(); this.failedReplicas = new HashSet<>(); this.regionsContacted = new HashSet<>(); this.connectionMode = ConnectionMode.DIRECT; } Duration getRequestLatency() { return Duration.between(requestStartTime, requestEndTime); } private boolean isCPUOverloaded() { return false; } void recordResponse(RxDocumentServiceRequest request, StoreResult storeResult) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.DIRECT; StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics(); storeResponseStatistics.requestResponseTime = responseTime; storeResponseStatistics.storeResult = storeResult; storeResponseStatistics.requestOperationType = request.getOperationType(); storeResponseStatistics.requestResourceType = request.getResourceType(); URI locationEndPoint = null; if (request.requestContext.locationEndpointToRoute != null) { try { locationEndPoint = request.requestContext.locationEndpointToRoute.toURI(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } if (locationEndPoint != null) { this.regionsContacted.add(locationEndPoint); } if (storeResponseStatistics.requestOperationType == OperationType.Head || storeResponseStatistics.requestOperationType == OperationType.HeadFeed) { this.supplementalResponseStatisticsList.add(storeResponseStatistics); } else { this.responseStatisticsList.add(storeResponseStatistics); } } } void recordGatewayResponse(RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponse storeResponse, CosmosClientException exception) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.GATEWAY; synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } this.gatewayStatistic = new GatewayStatistic(); this.gatewayStatistic.operationType = rxDocumentServiceRequest.getOperationType(); if (storeResponse != null) { this.gatewayStatistic.statusCode = storeResponse.getStatus(); this.gatewayStatistic.subStatusCode = DirectBridgeInternal.getSubStatusCode(storeResponse); ISessionToken sessionToken = null; String headerValue; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN)) != null) { sessionToken = SessionTokenHelper.parse(headerValue); } double requestCharge = 0; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE)) != null) { requestCharge = Double.parseDouble(headerValue); } this.gatewayStatistic.sessionToken = sessionToken; this.gatewayStatistic.requestCharge = requestCharge; } else if(exception != null){ this.gatewayStatistic.statusCode = exception.getStatusCode(); this.gatewayStatistic.subStatusCode = exception.getSubStatusCode(); } } } String recordAddressResolutionStart(URI targetEndpoint) { String identifier = Utils.randomUUID().toString(); AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics(); resolutionStatistics.startTime = ZonedDateTime.now(ZoneOffset.UTC); resolutionStatistics.endTime = ZonedDateTime.of(LocalDateTime.MAX, ZoneOffset.UTC); resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString(); synchronized (this) { this.addressResolutionStatistics.put(identifier, resolutionStatistics); } return identifier; } void recordAddressResolutionEnd(String identifier) { if (StringUtils.isEmpty(identifier)) { return; } ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); synchronized (this) { if (!this.addressResolutionStatistics.containsKey(identifier)) { throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start before calling end"); } if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier); resolutionStatistics.endTime = responseTime; } } @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); synchronized (this) { stringBuilder.append("RequestStartTime: ") .append("\"").append(this.requestStartTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("RequestEndTime: ") .append("\"").append(this.requestEndTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("Duration: ") .append(Duration.between(requestStartTime, requestEndTime).toMillis()) .append(" ms, ") .append("Connection Mode : " + this.connectionMode.toString() + ", ") .append("NUMBER of regions attempted: ") .append(this.regionsContacted.isEmpty() ? 1 : this.regionsContacted.size()) .append(System.lineSeparator()); for (StoreResponseStatistics storeResponseStatistics : this.responseStatisticsList) { stringBuilder.append(storeResponseStatistics.toString()).append(System.lineSeparator()); } for (AddressResolutionStatistics value : this.addressResolutionStatistics.values()) { stringBuilder.append(value.toString()).append(System.lineSeparator()); } int supplementalResponseStatisticsListCount = this.supplementalResponseStatisticsList.size(); int initialIndex = Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0); if (initialIndex != 0) { stringBuilder.append(" -- Displaying only the last ") .append(MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING) .append(" head/headfeed requests. Total head/headfeed requests: ") .append(supplementalResponseStatisticsListCount); } for (int i = initialIndex; i < supplementalResponseStatisticsListCount; i++) { stringBuilder.append(this.supplementalResponseStatisticsList.get(i).toString()).append(System.lineSeparator()); } if (this.gatewayStatistic != null) { stringBuilder.append(this.gatewayStatistic).append(System.lineSeparator()); } printSystemInformation(stringBuilder); } String requestStatsString = stringBuilder.toString(); if (!requestStatsString.isEmpty()) { return System.lineSeparator() + requestStatsString; } return StringUtils.EMPTY; } List<URI> getContactedReplicas() { return contactedReplicas; } void setContactedReplicas(List<URI> contactedReplicas) { this.contactedReplicas = contactedReplicas; } Set<URI> getFailedReplicas() { return failedReplicas; } void setFailedReplicas(Set<URI> failedReplicas) { this.failedReplicas = failedReplicas; } Set<URI> getRegionsContacted() { return regionsContacted; } void setRegionsContacted(Set<URI> regionsContacted) { this.regionsContacted = regionsContacted; } private static String formatDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } return dateTime.format(responseTimeFormatter); } private class StoreResponseStatistics { private ZonedDateTime requestResponseTime; private StoreResult storeResult; private ResourceType requestResourceType; private OperationType requestOperationType; @Override public String toString() { return "StoreResponseStatistics{" + "requestResponseTime=\"" + formatDateTime(requestResponseTime) + "\"" + ", storeResult=" + storeResult + ", requestResourceType=" + requestResourceType + ", requestOperationType=" + requestOperationType + '}'; } } private class AddressResolutionStatistics { private ZonedDateTime startTime; private ZonedDateTime endTime; private String targetEndpoint; AddressResolutionStatistics() { } @Override public String toString() { return "AddressResolutionStatistics{" + "startTime=\"" + formatDateTime(startTime) + "\"" + ", endTime=\"" + formatDateTime(endTime) + "\"" + ", targetEndpoint='" + targetEndpoint + '\'' + '}'; } } private class GatewayStatistic { private OperationType operationType; private int statusCode; private int subStatusCode; private ISessionToken sessionToken; private double requestCharge; public String toString() { return "Gateway statistics{ " + "Operation Type : " + this.operationType + "Status Code : " + this.statusCode + "Substatus Code : " + this.statusCode + "Session Token : " + (this.sessionToken != null ? this.sessionToken.convertToString() : null) + "Request Charge : " + requestCharge + '}'; } } }
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private final static OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean)ManagementFactory.getOperatingSystemMXBean(); private ZonedDateTime requestStartTime; private ZonedDateTime requestEndTime; private ConnectionMode connectionMode; private List<StoreResponseStatistics> responseStatisticsList; private List<StoreResponseStatistics> supplementalResponseStatisticsList; private Map<String, AddressResolutionStatistics> addressResolutionStatistics; private GatewayStatistic gatewayStatistic; private List<URI> contactedReplicas; private Set<URI> failedReplicas; private Set<URI> regionsContacted; ClientSideRequestStatistics() { this.requestStartTime = ZonedDateTime.now(ZoneOffset.UTC); this.requestEndTime = ZonedDateTime.now(ZoneOffset.UTC); this.responseStatisticsList = new ArrayList<>(); this.supplementalResponseStatisticsList = new ArrayList<>(); this.addressResolutionStatistics = new HashMap<>(); this.contactedReplicas = new ArrayList<>(); this.failedReplicas = new HashSet<>(); this.regionsContacted = new HashSet<>(); this.connectionMode = ConnectionMode.DIRECT; } Duration getRequestLatency() { return Duration.between(requestStartTime, requestEndTime); } private boolean isCPUOverloaded() { return false; } void recordResponse(RxDocumentServiceRequest request, StoreResult storeResult) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.DIRECT; StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics(); storeResponseStatistics.requestResponseTime = responseTime; storeResponseStatistics.storeResult = storeResult; storeResponseStatistics.requestOperationType = request.getOperationType(); storeResponseStatistics.requestResourceType = request.getResourceType(); URI locationEndPoint = null; if (request.requestContext.locationEndpointToRoute != null) { try { locationEndPoint = request.requestContext.locationEndpointToRoute.toURI(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } if (locationEndPoint != null) { this.regionsContacted.add(locationEndPoint); } if (storeResponseStatistics.requestOperationType == OperationType.Head || storeResponseStatistics.requestOperationType == OperationType.HeadFeed) { this.supplementalResponseStatisticsList.add(storeResponseStatistics); } else { this.responseStatisticsList.add(storeResponseStatistics); } } } void recordGatewayResponse(RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponse storeResponse, CosmosClientException exception) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.GATEWAY; synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } this.gatewayStatistic = new GatewayStatistic(); this.gatewayStatistic.operationType = rxDocumentServiceRequest.getOperationType(); if (storeResponse != null) { this.gatewayStatistic.statusCode = storeResponse.getStatus(); this.gatewayStatistic.subStatusCode = DirectBridgeInternal.getSubStatusCode(storeResponse); this.gatewayStatistic.sessionToken = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN); this.gatewayStatistic.requestCharge = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE); } else if(exception != null){ this.gatewayStatistic.statusCode = exception.getStatusCode(); this.gatewayStatistic.subStatusCode = exception.getSubStatusCode(); } } } String recordAddressResolutionStart(URI targetEndpoint) { String identifier = Utils.randomUUID().toString(); AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics(); resolutionStatistics.startTime = ZonedDateTime.now(ZoneOffset.UTC); resolutionStatistics.endTime = ZonedDateTime.of(LocalDateTime.MAX, ZoneOffset.UTC); resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString(); synchronized (this) { this.addressResolutionStatistics.put(identifier, resolutionStatistics); } return identifier; } void recordAddressResolutionEnd(String identifier) { if (StringUtils.isEmpty(identifier)) { return; } ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); synchronized (this) { if (!this.addressResolutionStatistics.containsKey(identifier)) { throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start before calling end"); } if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier); resolutionStatistics.endTime = responseTime; } } @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); synchronized (this) { stringBuilder.append("RequestStartTime: ") .append("\"").append(this.requestStartTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("RequestEndTime: ") .append("\"").append(this.requestEndTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("Duration: ") .append(Duration.between(requestStartTime, requestEndTime).toMillis()) .append(" ms, ") .append("Connection Mode : " + this.connectionMode.toString() + ", ") .append("NUMBER of regions attempted: ") .append(this.regionsContacted.isEmpty() ? 1 : this.regionsContacted.size()) .append(System.lineSeparator()); for (StoreResponseStatistics storeResponseStatistics : this.responseStatisticsList) { stringBuilder.append(storeResponseStatistics.toString()).append(System.lineSeparator()); } for (AddressResolutionStatistics value : this.addressResolutionStatistics.values()) { stringBuilder.append(value.toString()).append(System.lineSeparator()); } int supplementalResponseStatisticsListCount = this.supplementalResponseStatisticsList.size(); int initialIndex = Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0); if (initialIndex != 0) { stringBuilder.append(" -- Displaying only the last ") .append(MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING) .append(" head/headfeed requests. Total head/headfeed requests: ") .append(supplementalResponseStatisticsListCount); } for (int i = initialIndex; i < supplementalResponseStatisticsListCount; i++) { stringBuilder.append(this.supplementalResponseStatisticsList.get(i).toString()).append(System.lineSeparator()); } if (this.gatewayStatistic != null) { stringBuilder.append(this.gatewayStatistic).append(System.lineSeparator()); } printSystemInformation(stringBuilder); } String requestStatsString = stringBuilder.toString(); if (!requestStatsString.isEmpty()) { return System.lineSeparator() + requestStatsString; } return StringUtils.EMPTY; } List<URI> getContactedReplicas() { return contactedReplicas; } void setContactedReplicas(List<URI> contactedReplicas) { this.contactedReplicas = contactedReplicas; } Set<URI> getFailedReplicas() { return failedReplicas; } void setFailedReplicas(Set<URI> failedReplicas) { this.failedReplicas = failedReplicas; } Set<URI> getRegionsContacted() { return regionsContacted; } void setRegionsContacted(Set<URI> regionsContacted) { this.regionsContacted = regionsContacted; } private static String formatDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } return dateTime.format(responseTimeFormatter); } private class StoreResponseStatistics { private ZonedDateTime requestResponseTime; private StoreResult storeResult; private ResourceType requestResourceType; private OperationType requestOperationType; @Override public String toString() { return "StoreResponseStatistics{" + "requestResponseTime=\"" + formatDateTime(requestResponseTime) + "\"" + ", storeResult=" + storeResult + ", requestResourceType=" + requestResourceType + ", requestOperationType=" + requestOperationType + '}'; } } private class AddressResolutionStatistics { private ZonedDateTime startTime; private ZonedDateTime endTime; private String targetEndpoint; AddressResolutionStatistics() { } @Override public String toString() { return "AddressResolutionStatistics{" + "startTime=\"" + formatDateTime(startTime) + "\"" + ", endTime=\"" + formatDateTime(endTime) + "\"" + ", targetEndpoint='" + targetEndpoint + '\'' + '}'; } } private class GatewayStatistic { private OperationType operationType; private int statusCode; private int subStatusCode; private String sessionToken; private String requestCharge; public String toString() { return "Gateway statistics{ " + "Operation Type : " + this.operationType + "Status Code : " + this.statusCode + "Substatus Code : " + this.statusCode + "Session Token : " + this.sessionToken + "Request Charge : " + requestCharge + '}'; } } }
Thanks that was helpful
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String usedMemory = totalMemory - freeMemory + " KB"; String availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB"; OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean) ManagementFactory.getOperatingSystemMXBean(); String processCpuLoad = Double.toString(mbean.getProcessCpuLoad()); String systemCpuLoad = Double.toString(mbean.getSystemCpuLoad()); stringBuilder.append("System State Information -------").append(System.lineSeparator()); stringBuilder.append("Used Memory : " + usedMemory).append(System.lineSeparator()); stringBuilder.append("Available Memory : " + availableMemory).append(System.lineSeparator()); stringBuilder.append("CPU Process Load : " + processCpuLoad).append(System.lineSeparator()); stringBuilder.append("CPU System Load : " + systemCpuLoad).append(System.lineSeparator()); } catch (Exception e) { } }
String systemCpuLoad = Double.toString(mbean.getSystemCpuLoad());
private void printSystemInformation(StringBuilder stringBuilder) { try { long totalMemory = Runtime.getRuntime().totalMemory() / 1024; long freeMemory = Runtime.getRuntime().freeMemory() / 1024; long maxMemory = Runtime.getRuntime().maxMemory() / 1024; String usedMemory = totalMemory - freeMemory + " KB"; String availableMemory = (maxMemory - (totalMemory - freeMemory)) + " KB"; String processCpuLoad = Double.toString(this.mbean.getProcessCpuLoad()); String systemCpuLoad = Double.toString(this.mbean.getSystemCpuLoad()); stringBuilder.append("System State Information -------").append(System.lineSeparator()); stringBuilder.append("Used Memory : " + usedMemory).append(System.lineSeparator()); stringBuilder.append("Available Memory : " + availableMemory).append(System.lineSeparator()); stringBuilder.append("CPU Process Load : " + processCpuLoad).append(System.lineSeparator()); stringBuilder.append("CPU System Load : " + systemCpuLoad).append(System.lineSeparator()); } catch (Exception e) { } }
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private ZonedDateTime requestStartTime; private ZonedDateTime requestEndTime; private ConnectionMode connectionMode; private List<StoreResponseStatistics> responseStatisticsList; private List<StoreResponseStatistics> supplementalResponseStatisticsList; private Map<String, AddressResolutionStatistics> addressResolutionStatistics; private GatewayStatistic gatewayStatistic; private List<URI> contactedReplicas; private Set<URI> failedReplicas; private Set<URI> regionsContacted; ClientSideRequestStatistics() { this.requestStartTime = ZonedDateTime.now(ZoneOffset.UTC); this.requestEndTime = ZonedDateTime.now(ZoneOffset.UTC); this.responseStatisticsList = new ArrayList<>(); this.supplementalResponseStatisticsList = new ArrayList<>(); this.addressResolutionStatistics = new HashMap<>(); this.contactedReplicas = new ArrayList<>(); this.failedReplicas = new HashSet<>(); this.regionsContacted = new HashSet<>(); this.connectionMode = ConnectionMode.DIRECT; } Duration getRequestLatency() { return Duration.between(requestStartTime, requestEndTime); } private boolean isCPUOverloaded() { return false; } void recordResponse(RxDocumentServiceRequest request, StoreResult storeResult) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.DIRECT; StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics(); storeResponseStatistics.requestResponseTime = responseTime; storeResponseStatistics.storeResult = storeResult; storeResponseStatistics.requestOperationType = request.getOperationType(); storeResponseStatistics.requestResourceType = request.getResourceType(); URI locationEndPoint = null; if (request.requestContext.locationEndpointToRoute != null) { try { locationEndPoint = request.requestContext.locationEndpointToRoute.toURI(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } if (locationEndPoint != null) { this.regionsContacted.add(locationEndPoint); } if (storeResponseStatistics.requestOperationType == OperationType.Head || storeResponseStatistics.requestOperationType == OperationType.HeadFeed) { this.supplementalResponseStatisticsList.add(storeResponseStatistics); } else { this.responseStatisticsList.add(storeResponseStatistics); } } } void recordGatewayResponse(RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponse storeResponse, CosmosClientException exception) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.GATEWAY; synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } this.gatewayStatistic = new GatewayStatistic(); this.gatewayStatistic.operationType = rxDocumentServiceRequest.getOperationType(); if (storeResponse != null) { this.gatewayStatistic.statusCode = storeResponse.getStatus(); this.gatewayStatistic.subStatusCode = DirectBridgeInternal.getSubStatusCode(storeResponse); ISessionToken sessionToken = null; String headerValue; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN)) != null) { sessionToken = SessionTokenHelper.parse(headerValue); } double requestCharge = 0; if ((headerValue = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE)) != null) { requestCharge = Double.parseDouble(headerValue); } this.gatewayStatistic.sessionToken = sessionToken; this.gatewayStatistic.requestCharge = requestCharge; } else if(exception != null){ this.gatewayStatistic.statusCode = exception.getStatusCode(); this.gatewayStatistic.subStatusCode = exception.getSubStatusCode(); } } } String recordAddressResolutionStart(URI targetEndpoint) { String identifier = Utils.randomUUID().toString(); AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics(); resolutionStatistics.startTime = ZonedDateTime.now(ZoneOffset.UTC); resolutionStatistics.endTime = ZonedDateTime.of(LocalDateTime.MAX, ZoneOffset.UTC); resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString(); synchronized (this) { this.addressResolutionStatistics.put(identifier, resolutionStatistics); } return identifier; } void recordAddressResolutionEnd(String identifier) { if (StringUtils.isEmpty(identifier)) { return; } ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); synchronized (this) { if (!this.addressResolutionStatistics.containsKey(identifier)) { throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start before calling end"); } if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier); resolutionStatistics.endTime = responseTime; } } @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); synchronized (this) { stringBuilder.append("RequestStartTime: ") .append("\"").append(this.requestStartTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("RequestEndTime: ") .append("\"").append(this.requestEndTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("Duration: ") .append(Duration.between(requestStartTime, requestEndTime).toMillis()) .append(" ms, ") .append("Connection Mode : " + this.connectionMode.toString() + ", ") .append("NUMBER of regions attempted: ") .append(this.regionsContacted.isEmpty() ? 1 : this.regionsContacted.size()) .append(System.lineSeparator()); for (StoreResponseStatistics storeResponseStatistics : this.responseStatisticsList) { stringBuilder.append(storeResponseStatistics.toString()).append(System.lineSeparator()); } for (AddressResolutionStatistics value : this.addressResolutionStatistics.values()) { stringBuilder.append(value.toString()).append(System.lineSeparator()); } int supplementalResponseStatisticsListCount = this.supplementalResponseStatisticsList.size(); int initialIndex = Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0); if (initialIndex != 0) { stringBuilder.append(" -- Displaying only the last ") .append(MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING) .append(" head/headfeed requests. Total head/headfeed requests: ") .append(supplementalResponseStatisticsListCount); } for (int i = initialIndex; i < supplementalResponseStatisticsListCount; i++) { stringBuilder.append(this.supplementalResponseStatisticsList.get(i).toString()).append(System.lineSeparator()); } if (this.gatewayStatistic != null) { stringBuilder.append(this.gatewayStatistic).append(System.lineSeparator()); } printSystemInformation(stringBuilder); } String requestStatsString = stringBuilder.toString(); if (!requestStatsString.isEmpty()) { return System.lineSeparator() + requestStatsString; } return StringUtils.EMPTY; } List<URI> getContactedReplicas() { return contactedReplicas; } void setContactedReplicas(List<URI> contactedReplicas) { this.contactedReplicas = contactedReplicas; } Set<URI> getFailedReplicas() { return failedReplicas; } void setFailedReplicas(Set<URI> failedReplicas) { this.failedReplicas = failedReplicas; } Set<URI> getRegionsContacted() { return regionsContacted; } void setRegionsContacted(Set<URI> regionsContacted) { this.regionsContacted = regionsContacted; } private static String formatDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } return dateTime.format(responseTimeFormatter); } private class StoreResponseStatistics { private ZonedDateTime requestResponseTime; private StoreResult storeResult; private ResourceType requestResourceType; private OperationType requestOperationType; @Override public String toString() { return "StoreResponseStatistics{" + "requestResponseTime=\"" + formatDateTime(requestResponseTime) + "\"" + ", storeResult=" + storeResult + ", requestResourceType=" + requestResourceType + ", requestOperationType=" + requestOperationType + '}'; } } private class AddressResolutionStatistics { private ZonedDateTime startTime; private ZonedDateTime endTime; private String targetEndpoint; AddressResolutionStatistics() { } @Override public String toString() { return "AddressResolutionStatistics{" + "startTime=\"" + formatDateTime(startTime) + "\"" + ", endTime=\"" + formatDateTime(endTime) + "\"" + ", targetEndpoint='" + targetEndpoint + '\'' + '}'; } } private class GatewayStatistic { private OperationType operationType; private int statusCode; private int subStatusCode; private ISessionToken sessionToken; private double requestCharge; public String toString() { return "Gateway statistics{ " + "Operation Type : " + this.operationType + "Status Code : " + this.statusCode + "Substatus Code : " + this.statusCode + "Session Token : " + (this.sessionToken != null ? this.sessionToken.convertToString() : null) + "Request Charge : " + requestCharge + '}'; } } }
class ClientSideRequestStatistics { private final static int MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING = 10; private final static DateTimeFormatter responseTimeFormatter = DateTimeFormatter.ofPattern("dd MMM yyyy HH:mm:ss.SSS").withLocale(Locale.US); private final static OperatingSystemMXBean mbean = (com.sun.management.OperatingSystemMXBean)ManagementFactory.getOperatingSystemMXBean(); private ZonedDateTime requestStartTime; private ZonedDateTime requestEndTime; private ConnectionMode connectionMode; private List<StoreResponseStatistics> responseStatisticsList; private List<StoreResponseStatistics> supplementalResponseStatisticsList; private Map<String, AddressResolutionStatistics> addressResolutionStatistics; private GatewayStatistic gatewayStatistic; private List<URI> contactedReplicas; private Set<URI> failedReplicas; private Set<URI> regionsContacted; ClientSideRequestStatistics() { this.requestStartTime = ZonedDateTime.now(ZoneOffset.UTC); this.requestEndTime = ZonedDateTime.now(ZoneOffset.UTC); this.responseStatisticsList = new ArrayList<>(); this.supplementalResponseStatisticsList = new ArrayList<>(); this.addressResolutionStatistics = new HashMap<>(); this.contactedReplicas = new ArrayList<>(); this.failedReplicas = new HashSet<>(); this.regionsContacted = new HashSet<>(); this.connectionMode = ConnectionMode.DIRECT; } Duration getRequestLatency() { return Duration.between(requestStartTime, requestEndTime); } private boolean isCPUOverloaded() { return false; } void recordResponse(RxDocumentServiceRequest request, StoreResult storeResult) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.DIRECT; StoreResponseStatistics storeResponseStatistics = new StoreResponseStatistics(); storeResponseStatistics.requestResponseTime = responseTime; storeResponseStatistics.storeResult = storeResult; storeResponseStatistics.requestOperationType = request.getOperationType(); storeResponseStatistics.requestResourceType = request.getResourceType(); URI locationEndPoint = null; if (request.requestContext.locationEndpointToRoute != null) { try { locationEndPoint = request.requestContext.locationEndpointToRoute.toURI(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } if (locationEndPoint != null) { this.regionsContacted.add(locationEndPoint); } if (storeResponseStatistics.requestOperationType == OperationType.Head || storeResponseStatistics.requestOperationType == OperationType.HeadFeed) { this.supplementalResponseStatisticsList.add(storeResponseStatistics); } else { this.responseStatisticsList.add(storeResponseStatistics); } } } void recordGatewayResponse(RxDocumentServiceRequest rxDocumentServiceRequest, StoreResponse storeResponse, CosmosClientException exception) { ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); connectionMode = ConnectionMode.GATEWAY; synchronized (this) { if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } this.gatewayStatistic = new GatewayStatistic(); this.gatewayStatistic.operationType = rxDocumentServiceRequest.getOperationType(); if (storeResponse != null) { this.gatewayStatistic.statusCode = storeResponse.getStatus(); this.gatewayStatistic.subStatusCode = DirectBridgeInternal.getSubStatusCode(storeResponse); this.gatewayStatistic.sessionToken = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.SESSION_TOKEN); this.gatewayStatistic.requestCharge = storeResponse.getHeaderValue(HttpConstants.HttpHeaders.REQUEST_CHARGE); } else if(exception != null){ this.gatewayStatistic.statusCode = exception.getStatusCode(); this.gatewayStatistic.subStatusCode = exception.getSubStatusCode(); } } } String recordAddressResolutionStart(URI targetEndpoint) { String identifier = Utils.randomUUID().toString(); AddressResolutionStatistics resolutionStatistics = new AddressResolutionStatistics(); resolutionStatistics.startTime = ZonedDateTime.now(ZoneOffset.UTC); resolutionStatistics.endTime = ZonedDateTime.of(LocalDateTime.MAX, ZoneOffset.UTC); resolutionStatistics.targetEndpoint = targetEndpoint == null ? "<NULL>" : targetEndpoint.toString(); synchronized (this) { this.addressResolutionStatistics.put(identifier, resolutionStatistics); } return identifier; } void recordAddressResolutionEnd(String identifier) { if (StringUtils.isEmpty(identifier)) { return; } ZonedDateTime responseTime = ZonedDateTime.now(ZoneOffset.UTC); synchronized (this) { if (!this.addressResolutionStatistics.containsKey(identifier)) { throw new IllegalArgumentException("Identifier " + identifier + " does not exist. Please call start before calling end"); } if (responseTime.isAfter(this.requestEndTime)) { this.requestEndTime = responseTime; } AddressResolutionStatistics resolutionStatistics = this.addressResolutionStatistics.get(identifier); resolutionStatistics.endTime = responseTime; } } @Override public String toString() { StringBuilder stringBuilder = new StringBuilder(); synchronized (this) { stringBuilder.append("RequestStartTime: ") .append("\"").append(this.requestStartTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("RequestEndTime: ") .append("\"").append(this.requestEndTime.format(responseTimeFormatter)).append("\"") .append(", ") .append("Duration: ") .append(Duration.between(requestStartTime, requestEndTime).toMillis()) .append(" ms, ") .append("Connection Mode : " + this.connectionMode.toString() + ", ") .append("NUMBER of regions attempted: ") .append(this.regionsContacted.isEmpty() ? 1 : this.regionsContacted.size()) .append(System.lineSeparator()); for (StoreResponseStatistics storeResponseStatistics : this.responseStatisticsList) { stringBuilder.append(storeResponseStatistics.toString()).append(System.lineSeparator()); } for (AddressResolutionStatistics value : this.addressResolutionStatistics.values()) { stringBuilder.append(value.toString()).append(System.lineSeparator()); } int supplementalResponseStatisticsListCount = this.supplementalResponseStatisticsList.size(); int initialIndex = Math.max(supplementalResponseStatisticsListCount - MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING, 0); if (initialIndex != 0) { stringBuilder.append(" -- Displaying only the last ") .append(MAX_SUPPLEMENTAL_REQUESTS_FOR_TO_STRING) .append(" head/headfeed requests. Total head/headfeed requests: ") .append(supplementalResponseStatisticsListCount); } for (int i = initialIndex; i < supplementalResponseStatisticsListCount; i++) { stringBuilder.append(this.supplementalResponseStatisticsList.get(i).toString()).append(System.lineSeparator()); } if (this.gatewayStatistic != null) { stringBuilder.append(this.gatewayStatistic).append(System.lineSeparator()); } printSystemInformation(stringBuilder); } String requestStatsString = stringBuilder.toString(); if (!requestStatsString.isEmpty()) { return System.lineSeparator() + requestStatsString; } return StringUtils.EMPTY; } List<URI> getContactedReplicas() { return contactedReplicas; } void setContactedReplicas(List<URI> contactedReplicas) { this.contactedReplicas = contactedReplicas; } Set<URI> getFailedReplicas() { return failedReplicas; } void setFailedReplicas(Set<URI> failedReplicas) { this.failedReplicas = failedReplicas; } Set<URI> getRegionsContacted() { return regionsContacted; } void setRegionsContacted(Set<URI> regionsContacted) { this.regionsContacted = regionsContacted; } private static String formatDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } return dateTime.format(responseTimeFormatter); } private class StoreResponseStatistics { private ZonedDateTime requestResponseTime; private StoreResult storeResult; private ResourceType requestResourceType; private OperationType requestOperationType; @Override public String toString() { return "StoreResponseStatistics{" + "requestResponseTime=\"" + formatDateTime(requestResponseTime) + "\"" + ", storeResult=" + storeResult + ", requestResourceType=" + requestResourceType + ", requestOperationType=" + requestOperationType + '}'; } } private class AddressResolutionStatistics { private ZonedDateTime startTime; private ZonedDateTime endTime; private String targetEndpoint; AddressResolutionStatistics() { } @Override public String toString() { return "AddressResolutionStatistics{" + "startTime=\"" + formatDateTime(startTime) + "\"" + ", endTime=\"" + formatDateTime(endTime) + "\"" + ", targetEndpoint='" + targetEndpoint + '\'' + '}'; } } private class GatewayStatistic { private OperationType operationType; private int statusCode; private int subStatusCode; private String sessionToken; private String requestCharge; public String toString() { return "Gateway statistics{ " + "Operation Type : " + this.operationType + "Status Code : " + this.statusCode + "Substatus Code : " + this.statusCode + "Session Token : " + this.sessionToken + "Request Charge : " + requestCharge + '}'; } } }
as long as you are using a shared collection from TestSuiteBase, you don't need to delete documents, the test framework will do that when needed. please remove deleteItem here and elsewhere
public void gatewayDiagnostics() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Create); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); deleteItem(createResponse); }
deleteItem(createResponse);
public void gatewayDiagnostics() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Create); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); }
class CosmosResponseDiagnosticsTest extends TestSuiteBase { private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { assertThat(this.gatewayClient).isNull(); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .setEndpoint(TestConfigurations.HOST) .setKey(TestConfigurations.MASTER_KEY); ConnectionPolicy connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.GATEWAY); gatewayClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.DIRECT); directClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() throws CosmosClientException { assertThat(this.gatewayClient).isNotNull(); this.gatewayClient.close(); if (this.directClient != null) { this.directClient.close(); } } @Test(groups = {"simple"}) @Test(groups = {"simple"}) public void gatewayDiagnosticsOnException() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = this.container.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Status Code : 404"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Read); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); System.out.println(diagnostics); } finally { deleteItem(createResponse); } } @Test(groups = {"simple"}) public void systemDiagnosticsForSystemStateInformation() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = this.container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("System State Information ------"); assertThat(diagnostics).contains("Used Memory :"); assertThat(diagnostics).contains("Available Memory :"); assertThat(diagnostics).contains("CPU Process Load :"); assertThat(diagnostics).contains("CPU System Load :"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); deleteItem(createResponse); } @Test(groups = {"simple"}) public void directDiagnostics() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = cosmosContainer.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(diagnostics).contains("StoreResponseStatistics"); assertThat(diagnostics).doesNotContain("Gateway request URI :"); assertThat(diagnostics).contains("AddressResolutionStatistics"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); deleteItem(createResponse); } @Test(groups = {"simple"}) public void directDiagnosticsOnException() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = cosmosContainer.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } finally { deleteItem(createResponse); } } private CosmosItemProperties getCosmosItemProperties() { CosmosItemProperties cosmosItemProperties = new CosmosItemProperties(); cosmosItemProperties.setId(UUID.randomUUID().toString()); cosmosItemProperties.set("mypk", "test"); return cosmosItemProperties; } private void deleteItem(CosmosItemResponse cosmosItemResponse) throws CosmosClientException { CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("test")); cosmosItemResponse.getItem().delete(cosmosItemRequestOptions); } }
class CosmosResponseDiagnosticsTest extends TestSuiteBase { private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { assertThat(this.gatewayClient).isNull(); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .setEndpoint(TestConfigurations.HOST) .setKey(TestConfigurations.MASTER_KEY); ConnectionPolicy connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.GATEWAY); gatewayClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.DIRECT); directClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() throws CosmosClientException { assertThat(this.gatewayClient).isNotNull(); this.gatewayClient.close(); if (this.directClient != null) { this.directClient.close(); } } @Test(groups = {"simple"}) @Test(groups = {"simple"}) public void gatewayDiagnosticsOnException() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = this.container.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Status Code : 404"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Read); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } } @Test(groups = {"simple"}) public void systemDiagnosticsForSystemStateInformation() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = this.container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("System State Information ------"); assertThat(diagnostics).contains("Used Memory :"); assertThat(diagnostics).contains("Available Memory :"); assertThat(diagnostics).contains("CPU Process Load :"); assertThat(diagnostics).contains("CPU System Load :"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } @Test(groups = {"simple"}) public void directDiagnostics() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = cosmosContainer.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(diagnostics).contains("StoreResponseStatistics"); assertThat(diagnostics).doesNotContain("Gateway request URI :"); assertThat(diagnostics).contains("AddressResolutionStatistics"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } @Test(groups = {"simple"}) public void directDiagnosticsOnException() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = cosmosContainer.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } } private CosmosItemProperties getCosmosItemProperties() { CosmosItemProperties cosmosItemProperties = new CosmosItemProperties(); cosmosItemProperties.setId(UUID.randomUUID().toString()); cosmosItemProperties.set("mypk", "test"); return cosmosItemProperties; } }
as long as you are using a shared collection from TestSuiteBase, you don't need to delete documents, the test framework will do that when needed. please remove deleteItem here and elsewhere
public void gatewayDiagnosticsOnException() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = this.container.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Status Code : 404"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Read); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); System.out.println(diagnostics); } finally { deleteItem(createResponse); } }
deleteItem(createResponse);
public void gatewayDiagnosticsOnException() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = this.container.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Status Code : 404"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Read); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } }
class CosmosResponseDiagnosticsTest extends TestSuiteBase { private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { assertThat(this.gatewayClient).isNull(); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .setEndpoint(TestConfigurations.HOST) .setKey(TestConfigurations.MASTER_KEY); ConnectionPolicy connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.GATEWAY); gatewayClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.DIRECT); directClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() throws CosmosClientException { assertThat(this.gatewayClient).isNotNull(); this.gatewayClient.close(); if (this.directClient != null) { this.directClient.close(); } } @Test(groups = {"simple"}) public void gatewayDiagnostics() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Create); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); deleteItem(createResponse); } @Test(groups = {"simple"}) @Test(groups = {"simple"}) public void systemDiagnosticsForSystemStateInformation() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = this.container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("System State Information ------"); assertThat(diagnostics).contains("Used Memory :"); assertThat(diagnostics).contains("Available Memory :"); assertThat(diagnostics).contains("CPU Process Load :"); assertThat(diagnostics).contains("CPU System Load :"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); deleteItem(createResponse); } @Test(groups = {"simple"}) public void directDiagnostics() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = cosmosContainer.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(diagnostics).contains("StoreResponseStatistics"); assertThat(diagnostics).doesNotContain("Gateway request URI :"); assertThat(diagnostics).contains("AddressResolutionStatistics"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); deleteItem(createResponse); } @Test(groups = {"simple"}) public void directDiagnosticsOnException() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = cosmosContainer.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } finally { deleteItem(createResponse); } } private CosmosItemProperties getCosmosItemProperties() { CosmosItemProperties cosmosItemProperties = new CosmosItemProperties(); cosmosItemProperties.setId(UUID.randomUUID().toString()); cosmosItemProperties.set("mypk", "test"); return cosmosItemProperties; } private void deleteItem(CosmosItemResponse cosmosItemResponse) throws CosmosClientException { CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("test")); cosmosItemResponse.getItem().delete(cosmosItemRequestOptions); } }
class CosmosResponseDiagnosticsTest extends TestSuiteBase { private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { assertThat(this.gatewayClient).isNull(); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .setEndpoint(TestConfigurations.HOST) .setKey(TestConfigurations.MASTER_KEY); ConnectionPolicy connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.GATEWAY); gatewayClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.DIRECT); directClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() throws CosmosClientException { assertThat(this.gatewayClient).isNotNull(); this.gatewayClient.close(); if (this.directClient != null) { this.directClient.close(); } } @Test(groups = {"simple"}) public void gatewayDiagnostics() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Create); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } @Test(groups = {"simple"}) @Test(groups = {"simple"}) public void systemDiagnosticsForSystemStateInformation() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = this.container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("System State Information ------"); assertThat(diagnostics).contains("Used Memory :"); assertThat(diagnostics).contains("Available Memory :"); assertThat(diagnostics).contains("CPU Process Load :"); assertThat(diagnostics).contains("CPU System Load :"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } @Test(groups = {"simple"}) public void directDiagnostics() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = cosmosContainer.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(diagnostics).contains("StoreResponseStatistics"); assertThat(diagnostics).doesNotContain("Gateway request URI :"); assertThat(diagnostics).contains("AddressResolutionStatistics"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } @Test(groups = {"simple"}) public void directDiagnosticsOnException() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = cosmosContainer.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } } private CosmosItemProperties getCosmosItemProperties() { CosmosItemProperties cosmosItemProperties = new CosmosItemProperties(); cosmosItemProperties.setId(UUID.randomUUID().toString()); cosmosItemProperties.set("mypk", "test"); return cosmosItemProperties; } }
done
public void gatewayDiagnostics() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Create); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); deleteItem(createResponse); }
deleteItem(createResponse);
public void gatewayDiagnostics() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Create); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); }
class CosmosResponseDiagnosticsTest extends TestSuiteBase { private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { assertThat(this.gatewayClient).isNull(); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .setEndpoint(TestConfigurations.HOST) .setKey(TestConfigurations.MASTER_KEY); ConnectionPolicy connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.GATEWAY); gatewayClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.DIRECT); directClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() throws CosmosClientException { assertThat(this.gatewayClient).isNotNull(); this.gatewayClient.close(); if (this.directClient != null) { this.directClient.close(); } } @Test(groups = {"simple"}) @Test(groups = {"simple"}) public void gatewayDiagnosticsOnException() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = this.container.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Status Code : 404"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Read); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); System.out.println(diagnostics); } finally { deleteItem(createResponse); } } @Test(groups = {"simple"}) public void systemDiagnosticsForSystemStateInformation() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = this.container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("System State Information ------"); assertThat(diagnostics).contains("Used Memory :"); assertThat(diagnostics).contains("Available Memory :"); assertThat(diagnostics).contains("CPU Process Load :"); assertThat(diagnostics).contains("CPU System Load :"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); deleteItem(createResponse); } @Test(groups = {"simple"}) public void directDiagnostics() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = cosmosContainer.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(diagnostics).contains("StoreResponseStatistics"); assertThat(diagnostics).doesNotContain("Gateway request URI :"); assertThat(diagnostics).contains("AddressResolutionStatistics"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); deleteItem(createResponse); } @Test(groups = {"simple"}) public void directDiagnosticsOnException() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = cosmosContainer.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } finally { deleteItem(createResponse); } } private CosmosItemProperties getCosmosItemProperties() { CosmosItemProperties cosmosItemProperties = new CosmosItemProperties(); cosmosItemProperties.setId(UUID.randomUUID().toString()); cosmosItemProperties.set("mypk", "test"); return cosmosItemProperties; } private void deleteItem(CosmosItemResponse cosmosItemResponse) throws CosmosClientException { CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("test")); cosmosItemResponse.getItem().delete(cosmosItemRequestOptions); } }
class CosmosResponseDiagnosticsTest extends TestSuiteBase { private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { assertThat(this.gatewayClient).isNull(); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .setEndpoint(TestConfigurations.HOST) .setKey(TestConfigurations.MASTER_KEY); ConnectionPolicy connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.GATEWAY); gatewayClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.DIRECT); directClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() throws CosmosClientException { assertThat(this.gatewayClient).isNotNull(); this.gatewayClient.close(); if (this.directClient != null) { this.directClient.close(); } } @Test(groups = {"simple"}) @Test(groups = {"simple"}) public void gatewayDiagnosticsOnException() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = this.container.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Status Code : 404"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Read); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } } @Test(groups = {"simple"}) public void systemDiagnosticsForSystemStateInformation() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = this.container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("System State Information ------"); assertThat(diagnostics).contains("Used Memory :"); assertThat(diagnostics).contains("Available Memory :"); assertThat(diagnostics).contains("CPU Process Load :"); assertThat(diagnostics).contains("CPU System Load :"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } @Test(groups = {"simple"}) public void directDiagnostics() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = cosmosContainer.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(diagnostics).contains("StoreResponseStatistics"); assertThat(diagnostics).doesNotContain("Gateway request URI :"); assertThat(diagnostics).contains("AddressResolutionStatistics"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } @Test(groups = {"simple"}) public void directDiagnosticsOnException() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = cosmosContainer.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } } private CosmosItemProperties getCosmosItemProperties() { CosmosItemProperties cosmosItemProperties = new CosmosItemProperties(); cosmosItemProperties.setId(UUID.randomUUID().toString()); cosmosItemProperties.set("mypk", "test"); return cosmosItemProperties; } }
done
public void gatewayDiagnosticsOnException() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = this.container.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Status Code : 404"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Read); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); System.out.println(diagnostics); } finally { deleteItem(createResponse); } }
deleteItem(createResponse);
public void gatewayDiagnosticsOnException() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = this.container.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Status Code : 404"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Read); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } }
class CosmosResponseDiagnosticsTest extends TestSuiteBase { private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { assertThat(this.gatewayClient).isNull(); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .setEndpoint(TestConfigurations.HOST) .setKey(TestConfigurations.MASTER_KEY); ConnectionPolicy connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.GATEWAY); gatewayClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.DIRECT); directClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() throws CosmosClientException { assertThat(this.gatewayClient).isNotNull(); this.gatewayClient.close(); if (this.directClient != null) { this.directClient.close(); } } @Test(groups = {"simple"}) public void gatewayDiagnostics() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Create); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); deleteItem(createResponse); } @Test(groups = {"simple"}) @Test(groups = {"simple"}) public void systemDiagnosticsForSystemStateInformation() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = this.container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("System State Information ------"); assertThat(diagnostics).contains("Used Memory :"); assertThat(diagnostics).contains("Available Memory :"); assertThat(diagnostics).contains("CPU Process Load :"); assertThat(diagnostics).contains("CPU System Load :"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); deleteItem(createResponse); } @Test(groups = {"simple"}) public void directDiagnostics() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = cosmosContainer.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(diagnostics).contains("StoreResponseStatistics"); assertThat(diagnostics).doesNotContain("Gateway request URI :"); assertThat(diagnostics).contains("AddressResolutionStatistics"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); deleteItem(createResponse); } @Test(groups = {"simple"}) public void directDiagnosticsOnException() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = cosmosContainer.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } finally { deleteItem(createResponse); } } private CosmosItemProperties getCosmosItemProperties() { CosmosItemProperties cosmosItemProperties = new CosmosItemProperties(); cosmosItemProperties.setId(UUID.randomUUID().toString()); cosmosItemProperties.set("mypk", "test"); return cosmosItemProperties; } private void deleteItem(CosmosItemResponse cosmosItemResponse) throws CosmosClientException { CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("test")); cosmosItemResponse.getItem().delete(cosmosItemRequestOptions); } }
class CosmosResponseDiagnosticsTest extends TestSuiteBase { private CosmosClient gatewayClient; private CosmosClient directClient; private CosmosContainer container; private CosmosAsyncContainer cosmosAsyncContainer; @BeforeClass(groups = {"simple"}, timeOut = SETUP_TIMEOUT) public void beforeClass() throws Exception { assertThat(this.gatewayClient).isNull(); CosmosClientBuilder cosmosClientBuilder = new CosmosClientBuilder() .setEndpoint(TestConfigurations.HOST) .setKey(TestConfigurations.MASTER_KEY); ConnectionPolicy connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.GATEWAY); gatewayClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); connectionPolicy = new ConnectionPolicy(); connectionPolicy.setConnectionMode(ConnectionMode.DIRECT); directClient = cosmosClientBuilder.setConnectionPolicy(connectionPolicy).buildClient(); cosmosAsyncContainer = getSharedMultiPartitionCosmosContainer(this.gatewayClient.asyncClient()); container = gatewayClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); } @AfterClass(groups = {"simple"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() throws CosmosClientException { assertThat(this.gatewayClient).isNotNull(); this.gatewayClient.close(); if (this.directClient != null) { this.directClient.close(); } } @Test(groups = {"simple"}) public void gatewayDiagnostics() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.GATEWAY); assertThat(diagnostics).contains("Gateway statistics"); assertThat(diagnostics).contains("Operation Type : " + OperationType.Create); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } @Test(groups = {"simple"}) @Test(groups = {"simple"}) public void systemDiagnosticsForSystemStateInformation() throws CosmosClientException { CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = this.container.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("System State Information ------"); assertThat(diagnostics).contains("Used Memory :"); assertThat(diagnostics).contains("Available Memory :"); assertThat(diagnostics).contains("CPU Process Load :"); assertThat(diagnostics).contains("CPU System Load :"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } @Test(groups = {"simple"}) public void directDiagnostics() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = cosmosContainer.createItem(cosmosItemProperties); String diagnostics = createResponse.getCosmosResponseDiagnostics().toString(); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(diagnostics).contains("StoreResponseStatistics"); assertThat(diagnostics).doesNotContain("Gateway request URI :"); assertThat(diagnostics).contains("AddressResolutionStatistics"); assertThat(createResponse.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } @Test(groups = {"simple"}) public void directDiagnosticsOnException() throws CosmosClientException { CosmosContainer cosmosContainer = directClient.getDatabase(cosmosAsyncContainer.getDatabase().getId()).getContainer(cosmosAsyncContainer.getId()); CosmosItemProperties cosmosItemProperties = getCosmosItemProperties(); CosmosItemResponse createResponse = null; try { createResponse = this.container.createItem(cosmosItemProperties); CosmosItemRequestOptions cosmosItemRequestOptions = new CosmosItemRequestOptions(); cosmosItemRequestOptions.setPartitionKey(new PartitionKey("wrongPartitionKey")); CosmosItemResponse readResponse = cosmosContainer.getItem(createResponse.getItem().getId(), null).read(cosmosItemRequestOptions); fail("request should fail as partition key is wrong"); } catch (CosmosClientException exception) { String diagnostics = exception.getCosmosResponseDiagnostics().toString(); assertThat(exception.getStatusCode()).isEqualTo(HttpConstants.StatusCodes.NOTFOUND); assertThat(diagnostics).contains("Connection Mode : " + ConnectionMode.DIRECT); assertThat(exception.getCosmosResponseDiagnostics().getRequestLatency()).isNotNull(); } } private CosmosItemProperties getCosmosItemProperties() { CosmosItemProperties cosmosItemProperties = new CosmosItemProperties(); cosmosItemProperties.setId(UUID.randomUUID().toString()); cosmosItemProperties.set("mypk", "test"); return cosmosItemProperties; } }
We should also consider what happens to the user agent strings that were generated with the released version of the API and how this change will impact the user data analytics. Do we have other languages also making these changes to the user agent string?
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String overrideUserAgent = (String) context.getData(OVERRIDE_USER_AGENT_CONTEXT_KEY).orElse(null); String appendUserAgent = (String) context.getData(APPEND_USER_AGENT_CONTEXT_KEY).orElse(null); String userAgentValue; if (!CoreUtils.isNullOrEmpty(overrideUserAgent)) { userAgentValue = overrideUserAgent; } else if (!CoreUtils.isNullOrEmpty(appendUserAgent)) { userAgentValue = userAgent + " " + appendUserAgent; } else { userAgentValue = userAgent; } context.getHttpRequest().getHeaders().put(USER_AGENT, userAgentValue); return next.process(); }
String overrideUserAgent = (String) context.getData(OVERRIDE_USER_AGENT_CONTEXT_KEY).orElse(null);
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String overrideUserAgent = (String) context.getData(OVERRIDE_USER_AGENT_CONTEXT_KEY).orElse(null); String appendUserAgent = (String) context.getData(APPEND_USER_AGENT_CONTEXT_KEY).orElse(null); String userAgentValue; if (!CoreUtils.isNullOrEmpty(overrideUserAgent)) { userAgentValue = overrideUserAgent; } else if (!CoreUtils.isNullOrEmpty(appendUserAgent)) { userAgentValue = userAgent + " " + appendUserAgent; } else { userAgentValue = userAgent; } context.getHttpRequest().getHeaders().put(USER_AGENT, userAgentValue); return next.process(); }
class UserAgentPolicy implements HttpPipelinePolicy { private static final String USER_AGENT = "User-Agent"; private static final String DEFAULT_USER_AGENT_HEADER = "azsdk-java"; /** * Key for {@link Context} to add a value which will override the User-Agent supplied in this policy in an ad-hoc * manner. */ public static final String OVERRIDE_USER_AGENT_CONTEXT_KEY = "Override-User-Agent"; /** * Key for {@link Context} to add a value which will be appended to the User-Agent supplied in this policy in an * ad-hoc manner. */ public static final String APPEND_USER_AGENT_CONTEXT_KEY = "Append-User-Agent"; /* * The base User-Agent header format is azsdk-java-<client_lib>/<sdk_version>. Additional information such as the * application ID will be prepended and platform telemetry will be appended, a fully configured User-Agent header * format is <application_id> azsdk-java-<client_lib>/<sdk_version> <platform_info>. */ private static final String DEFAULT_USER_AGENT_FORMAT = DEFAULT_USER_AGENT_HEADER + "-%s/%s"; private static final String PLATFORM_INFO_FORMAT = "%s; %s %s"; private final String userAgent; /** * Creates a {@link UserAgentPolicy} with a default user agent string. */ public UserAgentPolicy() { this(null); } /** * Creates a UserAgentPolicy with {@code userAgent} as the header value. If {@code userAgent} is {@code null}, then * the default user agent value is used. * * @param userAgent The user agent string to add to request headers. */ public UserAgentPolicy(String userAgent) { if (userAgent != null) { this.userAgent = userAgent; } else { this.userAgent = DEFAULT_USER_AGENT_HEADER; } } /** * Creates a UserAgentPolicy with the {@code sdkName} and {@code sdkVersion} in the User-Agent header value. * * <p>If the passed configuration contains true for AZURE_TELEMETRY_DISABLED the platform information won't be * included in the user agent.</p> * * @param applicationId User specified application Id. * @param sdkName Name of the client library. * @param sdkVersion Version of the client library. * @param configuration Configuration store that will be checked for {@link * Configuration * Configuration */ public UserAgentPolicy(String applicationId, String sdkName, String sdkVersion, Configuration configuration) { this.userAgent = buildUserAgent(applicationId, sdkName, sdkVersion, configuration); } /** * Creates a UserAgentPolicy with the {@code sdkName} and {@code sdkVersion} in the User-Agent header value. * * <p>If the passed configuration contains true for AZURE_TELEMETRY_DISABLED the platform information won't be * included in the user agent.</p> * * @deprecated This method has been deprecated in favor of {@link * * @param sdkName Name of the client library. * @param sdkVersion Version of the client library. * @param version {@link ServiceVersion} of the service to be used when making requests. * @param configuration Configuration store that will be checked for {@link * Configuration * Configuration */ @Deprecated public UserAgentPolicy(String sdkName, String sdkVersion, Configuration configuration, ServiceVersion version) { this.userAgent = buildUserAgent(null, sdkName, sdkVersion, configuration); } /** * Updates the "User-Agent" header with the value supplied in the policy. * * <p>The {@code context} will be checked for {@code Override-User-Agent} and {@code Append-User-Agent}. * {@code Override-User-Agent} will take precedence over the value supplied in the policy, * {@code Append-User-Agent} will be appended to the value supplied in the policy.</p> * * @param context request context * @param next The next policy to invoke. * @return A publisher that initiates the request upon subscription and emits a response on completion. */ @Override /* * Builds the User-Agent header, at minimum this will create a User-Agent header with the DEFAULT_USER_AGENT_FORMAT. */ private static String buildUserAgent(String applicationId, String sdkName, String sdkVersion, Configuration configuration) { StringBuilder userAgentBuilder = new StringBuilder(); if (applicationId != null) { userAgentBuilder.append(applicationId).append(" "); } userAgentBuilder.append(String.format(DEFAULT_USER_AGENT_FORMAT, sdkName, sdkVersion)); if (!telemetryDisabled(configuration)) { userAgentBuilder.append(" ") .append("(") .append(getPlatformInfo()) .append(")"); } return userAgentBuilder.toString(); } /* * Retrieves the platform information telemetry that is appended to the User-Agent header. */ private static String getPlatformInfo() { String javaVersion = Configuration.getGlobalConfiguration().get("java.version"); String osName = Configuration.getGlobalConfiguration().get("os.name"); String osVersion = Configuration.getGlobalConfiguration().get("os.version"); return String.format(PLATFORM_INFO_FORMAT, javaVersion, osName, osVersion); } /* * Retrieves the telemetry disabled flag from the passed configuration if it isn't {@code null} otherwise it will * check in the global configuration. */ private static boolean telemetryDisabled(Configuration configuration) { return (configuration == null) ? Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_TELEMETRY_DISABLED, false) : configuration.get(Configuration.PROPERTY_AZURE_TELEMETRY_DISABLED, false); } }
class UserAgentPolicy implements HttpPipelinePolicy { private static final String USER_AGENT = "User-Agent"; private static final String DEFAULT_USER_AGENT_HEADER = "azsdk-java"; /** * Key for {@link Context} to add a value which will override the User-Agent supplied in this policy in an ad-hoc * manner. */ public static final String OVERRIDE_USER_AGENT_CONTEXT_KEY = "Override-User-Agent"; /** * Key for {@link Context} to add a value which will be appended to the User-Agent supplied in this policy in an * ad-hoc manner. */ public static final String APPEND_USER_AGENT_CONTEXT_KEY = "Append-User-Agent"; /* * The base User-Agent header format is azsdk-java-<client_lib>/<sdk_version>. Additional information such as the * application ID will be prepended and platform telemetry will be appended, a fully configured User-Agent header * format is <application_id> azsdk-java-<client_lib>/<sdk_version> <platform_info>. */ private static final String DEFAULT_USER_AGENT_FORMAT = DEFAULT_USER_AGENT_HEADER + "-%s/%s"; private static final String PLATFORM_INFO_FORMAT = "%s; %s %s"; private final String userAgent; /** * Creates a {@link UserAgentPolicy} with a default user agent string. */ public UserAgentPolicy() { this(null); } /** * Creates a UserAgentPolicy with {@code userAgent} as the header value. If {@code userAgent} is {@code null}, then * the default user agent value is used. * * @param userAgent The user agent string to add to request headers. */ public UserAgentPolicy(String userAgent) { if (userAgent != null) { this.userAgent = userAgent; } else { this.userAgent = DEFAULT_USER_AGENT_HEADER; } } /** * Creates a UserAgentPolicy with the {@code sdkName} and {@code sdkVersion} in the User-Agent header value. * * <p>If the passed configuration contains true for AZURE_TELEMETRY_DISABLED the platform information won't be * included in the user agent.</p> * * @param applicationId User specified application Id. * @param sdkName Name of the client library. * @param sdkVersion Version of the client library. * @param configuration Configuration store that will be checked for {@link * Configuration * Configuration */ public UserAgentPolicy(String applicationId, String sdkName, String sdkVersion, Configuration configuration) { this.userAgent = buildUserAgent(applicationId, sdkName, sdkVersion, configuration); } /** * Creates a UserAgentPolicy with the {@code sdkName} and {@code sdkVersion} in the User-Agent header value. * * <p>If the passed configuration contains true for AZURE_TELEMETRY_DISABLED the platform information won't be * included in the user agent.</p> * * @deprecated This method has been deprecated in favor of {@link * * @param sdkName Name of the client library. * @param sdkVersion Version of the client library. * @param version {@link ServiceVersion} of the service to be used when making requests. * @param configuration Configuration store that will be checked for {@link * Configuration * Configuration */ @Deprecated public UserAgentPolicy(String sdkName, String sdkVersion, Configuration configuration, ServiceVersion version) { this.userAgent = buildUserAgent(null, sdkName, sdkVersion, configuration); } /** * Updates the "User-Agent" header with the value supplied in the policy. * * <p>The {@code context} will be checked for {@code Override-User-Agent} and {@code Append-User-Agent}. * {@code Override-User-Agent} will take precedence over the value supplied in the policy, * {@code Append-User-Agent} will be appended to the value supplied in the policy.</p> * * @param context request context * @param next The next policy to invoke. * @return A publisher that initiates the request upon subscription and emits a response on completion. */ @Override /* * Builds the User-Agent header, at minimum this will create a User-Agent header with the DEFAULT_USER_AGENT_FORMAT. */ private static String buildUserAgent(String applicationId, String sdkName, String sdkVersion, Configuration configuration) { StringBuilder userAgentBuilder = new StringBuilder(); if (applicationId != null) { userAgentBuilder.append(applicationId).append(" "); } userAgentBuilder.append(String.format(DEFAULT_USER_AGENT_FORMAT, sdkName, sdkVersion)); if (!telemetryDisabled(configuration)) { userAgentBuilder.append(" ") .append("(") .append(getPlatformInfo()) .append(")"); } return userAgentBuilder.toString(); } /* * Retrieves the platform information telemetry that is appended to the User-Agent header. */ private static String getPlatformInfo() { String javaVersion = Configuration.getGlobalConfiguration().get("java.version"); String osName = Configuration.getGlobalConfiguration().get("os.name"); String osVersion = Configuration.getGlobalConfiguration().get("os.version"); return String.format(PLATFORM_INFO_FORMAT, javaVersion, osName, osVersion); } /* * Retrieves the telemetry disabled flag from the passed configuration if it isn't {@code null} otherwise it will * check in the global configuration. */ private static boolean telemetryDisabled(Configuration configuration) { return (configuration == null) ? Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_TELEMETRY_DISABLED, false) : configuration.get(Configuration.PROPERTY_AZURE_TELEMETRY_DISABLED, false); } }
I'll search the other languages handling on this, if need be we can revert this portion of the change.
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String overrideUserAgent = (String) context.getData(OVERRIDE_USER_AGENT_CONTEXT_KEY).orElse(null); String appendUserAgent = (String) context.getData(APPEND_USER_AGENT_CONTEXT_KEY).orElse(null); String userAgentValue; if (!CoreUtils.isNullOrEmpty(overrideUserAgent)) { userAgentValue = overrideUserAgent; } else if (!CoreUtils.isNullOrEmpty(appendUserAgent)) { userAgentValue = userAgent + " " + appendUserAgent; } else { userAgentValue = userAgent; } context.getHttpRequest().getHeaders().put(USER_AGENT, userAgentValue); return next.process(); }
String overrideUserAgent = (String) context.getData(OVERRIDE_USER_AGENT_CONTEXT_KEY).orElse(null);
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String overrideUserAgent = (String) context.getData(OVERRIDE_USER_AGENT_CONTEXT_KEY).orElse(null); String appendUserAgent = (String) context.getData(APPEND_USER_AGENT_CONTEXT_KEY).orElse(null); String userAgentValue; if (!CoreUtils.isNullOrEmpty(overrideUserAgent)) { userAgentValue = overrideUserAgent; } else if (!CoreUtils.isNullOrEmpty(appendUserAgent)) { userAgentValue = userAgent + " " + appendUserAgent; } else { userAgentValue = userAgent; } context.getHttpRequest().getHeaders().put(USER_AGENT, userAgentValue); return next.process(); }
class UserAgentPolicy implements HttpPipelinePolicy { private static final String USER_AGENT = "User-Agent"; private static final String DEFAULT_USER_AGENT_HEADER = "azsdk-java"; /** * Key for {@link Context} to add a value which will override the User-Agent supplied in this policy in an ad-hoc * manner. */ public static final String OVERRIDE_USER_AGENT_CONTEXT_KEY = "Override-User-Agent"; /** * Key for {@link Context} to add a value which will be appended to the User-Agent supplied in this policy in an * ad-hoc manner. */ public static final String APPEND_USER_AGENT_CONTEXT_KEY = "Append-User-Agent"; /* * The base User-Agent header format is azsdk-java-<client_lib>/<sdk_version>. Additional information such as the * application ID will be prepended and platform telemetry will be appended, a fully configured User-Agent header * format is <application_id> azsdk-java-<client_lib>/<sdk_version> <platform_info>. */ private static final String DEFAULT_USER_AGENT_FORMAT = DEFAULT_USER_AGENT_HEADER + "-%s/%s"; private static final String PLATFORM_INFO_FORMAT = "%s; %s %s"; private final String userAgent; /** * Creates a {@link UserAgentPolicy} with a default user agent string. */ public UserAgentPolicy() { this(null); } /** * Creates a UserAgentPolicy with {@code userAgent} as the header value. If {@code userAgent} is {@code null}, then * the default user agent value is used. * * @param userAgent The user agent string to add to request headers. */ public UserAgentPolicy(String userAgent) { if (userAgent != null) { this.userAgent = userAgent; } else { this.userAgent = DEFAULT_USER_AGENT_HEADER; } } /** * Creates a UserAgentPolicy with the {@code sdkName} and {@code sdkVersion} in the User-Agent header value. * * <p>If the passed configuration contains true for AZURE_TELEMETRY_DISABLED the platform information won't be * included in the user agent.</p> * * @param applicationId User specified application Id. * @param sdkName Name of the client library. * @param sdkVersion Version of the client library. * @param configuration Configuration store that will be checked for {@link * Configuration * Configuration */ public UserAgentPolicy(String applicationId, String sdkName, String sdkVersion, Configuration configuration) { this.userAgent = buildUserAgent(applicationId, sdkName, sdkVersion, configuration); } /** * Creates a UserAgentPolicy with the {@code sdkName} and {@code sdkVersion} in the User-Agent header value. * * <p>If the passed configuration contains true for AZURE_TELEMETRY_DISABLED the platform information won't be * included in the user agent.</p> * * @deprecated This method has been deprecated in favor of {@link * * @param sdkName Name of the client library. * @param sdkVersion Version of the client library. * @param version {@link ServiceVersion} of the service to be used when making requests. * @param configuration Configuration store that will be checked for {@link * Configuration * Configuration */ @Deprecated public UserAgentPolicy(String sdkName, String sdkVersion, Configuration configuration, ServiceVersion version) { this.userAgent = buildUserAgent(null, sdkName, sdkVersion, configuration); } /** * Updates the "User-Agent" header with the value supplied in the policy. * * <p>The {@code context} will be checked for {@code Override-User-Agent} and {@code Append-User-Agent}. * {@code Override-User-Agent} will take precedence over the value supplied in the policy, * {@code Append-User-Agent} will be appended to the value supplied in the policy.</p> * * @param context request context * @param next The next policy to invoke. * @return A publisher that initiates the request upon subscription and emits a response on completion. */ @Override /* * Builds the User-Agent header, at minimum this will create a User-Agent header with the DEFAULT_USER_AGENT_FORMAT. */ private static String buildUserAgent(String applicationId, String sdkName, String sdkVersion, Configuration configuration) { StringBuilder userAgentBuilder = new StringBuilder(); if (applicationId != null) { userAgentBuilder.append(applicationId).append(" "); } userAgentBuilder.append(String.format(DEFAULT_USER_AGENT_FORMAT, sdkName, sdkVersion)); if (!telemetryDisabled(configuration)) { userAgentBuilder.append(" ") .append("(") .append(getPlatformInfo()) .append(")"); } return userAgentBuilder.toString(); } /* * Retrieves the platform information telemetry that is appended to the User-Agent header. */ private static String getPlatformInfo() { String javaVersion = Configuration.getGlobalConfiguration().get("java.version"); String osName = Configuration.getGlobalConfiguration().get("os.name"); String osVersion = Configuration.getGlobalConfiguration().get("os.version"); return String.format(PLATFORM_INFO_FORMAT, javaVersion, osName, osVersion); } /* * Retrieves the telemetry disabled flag from the passed configuration if it isn't {@code null} otherwise it will * check in the global configuration. */ private static boolean telemetryDisabled(Configuration configuration) { return (configuration == null) ? Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_TELEMETRY_DISABLED, false) : configuration.get(Configuration.PROPERTY_AZURE_TELEMETRY_DISABLED, false); } }
class UserAgentPolicy implements HttpPipelinePolicy { private static final String USER_AGENT = "User-Agent"; private static final String DEFAULT_USER_AGENT_HEADER = "azsdk-java"; /** * Key for {@link Context} to add a value which will override the User-Agent supplied in this policy in an ad-hoc * manner. */ public static final String OVERRIDE_USER_AGENT_CONTEXT_KEY = "Override-User-Agent"; /** * Key for {@link Context} to add a value which will be appended to the User-Agent supplied in this policy in an * ad-hoc manner. */ public static final String APPEND_USER_AGENT_CONTEXT_KEY = "Append-User-Agent"; /* * The base User-Agent header format is azsdk-java-<client_lib>/<sdk_version>. Additional information such as the * application ID will be prepended and platform telemetry will be appended, a fully configured User-Agent header * format is <application_id> azsdk-java-<client_lib>/<sdk_version> <platform_info>. */ private static final String DEFAULT_USER_AGENT_FORMAT = DEFAULT_USER_AGENT_HEADER + "-%s/%s"; private static final String PLATFORM_INFO_FORMAT = "%s; %s %s"; private final String userAgent; /** * Creates a {@link UserAgentPolicy} with a default user agent string. */ public UserAgentPolicy() { this(null); } /** * Creates a UserAgentPolicy with {@code userAgent} as the header value. If {@code userAgent} is {@code null}, then * the default user agent value is used. * * @param userAgent The user agent string to add to request headers. */ public UserAgentPolicy(String userAgent) { if (userAgent != null) { this.userAgent = userAgent; } else { this.userAgent = DEFAULT_USER_AGENT_HEADER; } } /** * Creates a UserAgentPolicy with the {@code sdkName} and {@code sdkVersion} in the User-Agent header value. * * <p>If the passed configuration contains true for AZURE_TELEMETRY_DISABLED the platform information won't be * included in the user agent.</p> * * @param applicationId User specified application Id. * @param sdkName Name of the client library. * @param sdkVersion Version of the client library. * @param configuration Configuration store that will be checked for {@link * Configuration * Configuration */ public UserAgentPolicy(String applicationId, String sdkName, String sdkVersion, Configuration configuration) { this.userAgent = buildUserAgent(applicationId, sdkName, sdkVersion, configuration); } /** * Creates a UserAgentPolicy with the {@code sdkName} and {@code sdkVersion} in the User-Agent header value. * * <p>If the passed configuration contains true for AZURE_TELEMETRY_DISABLED the platform information won't be * included in the user agent.</p> * * @deprecated This method has been deprecated in favor of {@link * * @param sdkName Name of the client library. * @param sdkVersion Version of the client library. * @param version {@link ServiceVersion} of the service to be used when making requests. * @param configuration Configuration store that will be checked for {@link * Configuration * Configuration */ @Deprecated public UserAgentPolicy(String sdkName, String sdkVersion, Configuration configuration, ServiceVersion version) { this.userAgent = buildUserAgent(null, sdkName, sdkVersion, configuration); } /** * Updates the "User-Agent" header with the value supplied in the policy. * * <p>The {@code context} will be checked for {@code Override-User-Agent} and {@code Append-User-Agent}. * {@code Override-User-Agent} will take precedence over the value supplied in the policy, * {@code Append-User-Agent} will be appended to the value supplied in the policy.</p> * * @param context request context * @param next The next policy to invoke. * @return A publisher that initiates the request upon subscription and emits a response on completion. */ @Override /* * Builds the User-Agent header, at minimum this will create a User-Agent header with the DEFAULT_USER_AGENT_FORMAT. */ private static String buildUserAgent(String applicationId, String sdkName, String sdkVersion, Configuration configuration) { StringBuilder userAgentBuilder = new StringBuilder(); if (applicationId != null) { userAgentBuilder.append(applicationId).append(" "); } userAgentBuilder.append(String.format(DEFAULT_USER_AGENT_FORMAT, sdkName, sdkVersion)); if (!telemetryDisabled(configuration)) { userAgentBuilder.append(" ") .append("(") .append(getPlatformInfo()) .append(")"); } return userAgentBuilder.toString(); } /* * Retrieves the platform information telemetry that is appended to the User-Agent header. */ private static String getPlatformInfo() { String javaVersion = Configuration.getGlobalConfiguration().get("java.version"); String osName = Configuration.getGlobalConfiguration().get("os.name"); String osVersion = Configuration.getGlobalConfiguration().get("os.version"); return String.format(PLATFORM_INFO_FORMAT, javaVersion, osName, osVersion); } /* * Retrieves the telemetry disabled flag from the passed configuration if it isn't {@code null} otherwise it will * check in the global configuration. */ private static boolean telemetryDisabled(Configuration configuration) { return (configuration == null) ? Configuration.getGlobalConfiguration().get(Configuration.PROPERTY_AZURE_TELEMETRY_DISABLED, false) : configuration.get(Configuration.PROPERTY_AZURE_TELEMETRY_DISABLED, false); } }
If this is null do we want to add a null value into the converted list?
private static BlobSignedIdentifier toBlobIdentifier(DataLakeSignedIdentifier identifier) { if (identifier == null) { return null; } return new BlobSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toBlobAccessPolicy(identifier.getAccessPolicy())); }
if (identifier == null) {
private static BlobSignedIdentifier toBlobIdentifier(DataLakeSignedIdentifier identifier) { if (identifier == null) { return null; } return new BlobSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toBlobAccessPolicy(identifier.getAccessPolicy())); }
class Transforms { static com.azure.storage.blob.models.PublicAccessType toBlobPublicAccessType(PublicAccessType fileSystemPublicAccessType) { if (fileSystemPublicAccessType == null) { return null; } return com.azure.storage.blob.models.PublicAccessType.fromString(fileSystemPublicAccessType.toString()); } private static LeaseDurationType toDataLakeLeaseDurationType(com.azure.storage.blob.models.LeaseDurationType blobLeaseDurationType) { if (blobLeaseDurationType == null) { return null; } return LeaseDurationType.fromString(blobLeaseDurationType.toString()); } private static LeaseStateType toDataLakeLeaseStateType(com.azure.storage.blob.models.LeaseStateType blobLeaseStateType) { if (blobLeaseStateType == null) { return null; } return LeaseStateType.fromString(blobLeaseStateType.toString()); } private static LeaseStatusType toDataLakeLeaseStatusType(com.azure.storage.blob.models.LeaseStatusType blobLeaseStatusType) { if (blobLeaseStatusType == null) { return null; } return LeaseStatusType.fromString(blobLeaseStatusType.toString()); } private static PublicAccessType toDataLakePublicAccessType(com.azure.storage.blob.models.PublicAccessType blobPublicAccessType) { if (blobPublicAccessType == null) { return null; } return PublicAccessType.fromString(blobPublicAccessType.toString()); } private static CopyStatusType toDataLakeCopyStatusType( com.azure.storage.blob.models.CopyStatusType blobCopyStatus) { if (blobCopyStatus == null) { return null; } return CopyStatusType.fromString(blobCopyStatus.toString()); } private static ArchiveStatus toDataLakeArchiveStatus( com.azure.storage.blob.models.ArchiveStatus blobArchiveStatus) { if (blobArchiveStatus == null) { return null; } return ArchiveStatus.fromString(blobArchiveStatus.toString()); } private static AccessTier toDataLakeAccessTier(com.azure.storage.blob.models.AccessTier blobAccessTier) { if (blobAccessTier == null) { return null; } return AccessTier.fromString(blobAccessTier.toString()); } static FileSystemProperties toFileSystemProperties(BlobContainerProperties blobContainerProperties) { if (blobContainerProperties == null) { return null; } return new FileSystemProperties(blobContainerProperties.getMetadata(), blobContainerProperties.getETag(), blobContainerProperties.getLastModified(), Transforms.toDataLakeLeaseDurationType(blobContainerProperties.getLeaseDuration()), Transforms.toDataLakeLeaseStateType(blobContainerProperties.getLeaseState()), Transforms.toDataLakeLeaseStatusType(blobContainerProperties.getLeaseStatus()), Transforms.toDataLakePublicAccessType(blobContainerProperties.getBlobPublicAccess()), blobContainerProperties.hasImmutabilityPolicy(), blobContainerProperties.hasLegalHold()); } private static BlobContainerListDetails toBlobContainerListDetails(FileSystemListDetails fileSystemListDetails) { return new BlobContainerListDetails() .setRetrieveMetadata(fileSystemListDetails.getRetrieveMetadata()); } static ListBlobContainersOptions toListBlobContainersOptions(ListFileSystemsOptions listFileSystemsOptions) { return new ListBlobContainersOptions() .setDetails(toBlobContainerListDetails(listFileSystemsOptions.getDetails())) .setMaxResultsPerPage(listFileSystemsOptions.getMaxResultsPerPage()) .setPrefix(listFileSystemsOptions.getPrefix()); } static UserDelegationKey toDataLakeUserDelegationKey(com.azure.storage.blob.models.UserDelegationKey blobUserDelegationKey) { if (blobUserDelegationKey == null) { return null; } return new UserDelegationKey() .setSignedExpiry(blobUserDelegationKey.getSignedExpiry()) .setSignedObjectId(blobUserDelegationKey.getSignedObjectId()) .setSignedTenantId(blobUserDelegationKey.getSignedTenantId()) .setSignedService(blobUserDelegationKey.getSignedService()) .setSignedStart(blobUserDelegationKey.getSignedStart()) .setSignedVersion(blobUserDelegationKey.getSignedVersion()) .setValue(blobUserDelegationKey.getValue()); } static BlobHttpHeaders toBlobHttpHeaders(PathHttpHeaders pathHTTPHeaders) { if (pathHTTPHeaders == null) { return null; } return new BlobHttpHeaders() .setCacheControl(pathHTTPHeaders.getCacheControl()) .setContentDisposition(pathHTTPHeaders.getContentDisposition()) .setContentEncoding(pathHTTPHeaders.getContentEncoding()) .setContentLanguage(pathHTTPHeaders.getContentLanguage()) .setContentType(pathHTTPHeaders.getContentType()) .setContentMd5(pathHTTPHeaders.getContentMd5()); } static BlobRange toBlobRange(FileRange fileRange) { if (fileRange == null) { return null; } return new BlobRange(fileRange.getOffset(), fileRange.getCount()); } static com.azure.storage.blob.models.DownloadRetryOptions toBlobDownloadRetryOptions( DownloadRetryOptions dataLakeOptions) { if (dataLakeOptions == null) { return null; } return new com.azure.storage.blob.models.DownloadRetryOptions() .setMaxRetryRequests(dataLakeOptions.getMaxRetryRequests()); } static PathProperties toPathProperties(BlobProperties properties) { if (properties == null) { return null; } else { return new PathProperties(properties.getCreationTime(), properties.getLastModified(), properties.getETag(), properties.getBlobSize(), properties.getContentType(), properties.getContentMd5(), properties.getContentEncoding(), properties.getContentDisposition(), properties.getContentLanguage(), properties.getCacheControl(), Transforms.toDataLakeLeaseStatusType(properties.getLeaseStatus()), Transforms.toDataLakeLeaseStateType(properties.getLeaseState()), Transforms.toDataLakeLeaseDurationType(properties.getLeaseDuration()), properties.getCopyId(), Transforms.toDataLakeCopyStatusType(properties.getCopyStatus()), properties.getCopySource(), properties.getCopyProgress(), properties.getCopyCompletionTime(), properties.getCopyStatusDescription(), properties.isServerEncrypted(), properties.isIncrementalCopy(), Transforms.toDataLakeAccessTier(properties.getAccessTier()), Transforms.toDataLakeArchiveStatus(properties.getArchiveStatus()), properties.getEncryptionKeySha256(), properties.getAccessTierChangeTime(), properties.getMetadata()); } } static FileSystemItem toFileSystemItem(BlobContainerItem blobContainerItem) { if (blobContainerItem == null) { return null; } return new FileSystemItem() .setName(blobContainerItem.getName()) .setMetadata(blobContainerItem.getMetadata()) .setProperties(Transforms.toFileSystemItemProperties(blobContainerItem.getProperties())); } private static FileSystemItemProperties toFileSystemItemProperties( BlobContainerItemProperties blobContainerItemProperties) { if (blobContainerItemProperties == null) { return null; } return new FileSystemItemProperties() .setETag(blobContainerItemProperties.getETag()) .setLastModified(blobContainerItemProperties.getLastModified()) .setLeaseStatus(toDataLakeLeaseStatusType(blobContainerItemProperties.getLeaseStatus())) .setLeaseState(toDataLakeLeaseStateType(blobContainerItemProperties.getLeaseState())) .setLeaseDuration(toDataLakeLeaseDurationType(blobContainerItemProperties.getLeaseDuration())) .setPublicAccess(toDataLakePublicAccessType(blobContainerItemProperties.getPublicAccess())) .setHasLegalHold(blobContainerItemProperties.isHasLegalHold()) .setHasImmutabilityPolicy(blobContainerItemProperties.isHasImmutabilityPolicy()); } static PathItem toPathItem(Path path) { if (path == null) { return null; } return new PathItem(path.getETag(), OffsetDateTime.parse(path.getLastModified(), DateTimeFormatter.RFC_1123_DATE_TIME), path.getContentLength(), path.getGroup(), path.isDirectory() == null ? false : path.isDirectory(), path.getName(), path.getOwner(), path.getPermissions()); } static BlobRequestConditions toBlobRequestConditions(DataLakeRequestConditions requestConditions) { if (requestConditions == null) { return null; } return new BlobRequestConditions() .setLeaseId(requestConditions.getLeaseId()) .setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince()) .setIfNoneMatch(requestConditions.getIfNoneMatch()) .setIfMatch(requestConditions.getIfMatch()) .setIfModifiedSince(requestConditions.getIfModifiedSince()); } static FileReadResponse toFileReadResponse(BlobDownloadResponse r) { if (r == null) { return null; } return new FileReadResponse(Transforms.toFileReadAsyncResponse(new BlobDownloadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileReadAsyncResponse toFileReadAsyncResponse(BlobDownloadAsyncResponse r) { if (r == null) { return null; } return new FileReadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toPathReadHeaders(r.getDeserializedHeaders())); } private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h) { if (h == null) { return null; } return new FileReadHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setIsServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMD5(h.getBlobContentMD5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()); } static List<BlobSignedIdentifier> toBlobIdentifierList(List<DataLakeSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<BlobSignedIdentifier> blobIdentifiers = new ArrayList<>(); for (DataLakeSignedIdentifier identifier : identifiers) { blobIdentifiers.add(Transforms.toBlobIdentifier(identifier)); } return blobIdentifiers; } private static BlobAccessPolicy toBlobAccessPolicy(DataLakeAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new BlobAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static FileSystemAccessPolicies toFileSystemAccessPolicies(BlobContainerAccessPolicies accessPolicies) { if (accessPolicies == null) { return null; } return new FileSystemAccessPolicies(Transforms.toDataLakePublicAccessType(accessPolicies.getBlobAccessType()), Transforms.toDataLakeIdentifierList(accessPolicies.getIdentifiers())); } static List<DataLakeSignedIdentifier> toDataLakeIdentifierList(List<BlobSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<DataLakeSignedIdentifier> dataLakeIdentifiers = new ArrayList<>(); for (BlobSignedIdentifier identifier : identifiers) { dataLakeIdentifiers.add(Transforms.toDataLakeIdentifier(identifier)); } return dataLakeIdentifiers; } private static DataLakeSignedIdentifier toDataLakeIdentifier(BlobSignedIdentifier identifier) { if (identifier == null) { return null; } return new DataLakeSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toDataLakeAccessPolicy(identifier.getAccessPolicy())); } private static DataLakeAccessPolicy toDataLakeAccessPolicy(BlobAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new DataLakeAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } }
class Transforms { static com.azure.storage.blob.models.PublicAccessType toBlobPublicAccessType(PublicAccessType fileSystemPublicAccessType) { if (fileSystemPublicAccessType == null) { return null; } return com.azure.storage.blob.models.PublicAccessType.fromString(fileSystemPublicAccessType.toString()); } private static LeaseDurationType toDataLakeLeaseDurationType(com.azure.storage.blob.models.LeaseDurationType blobLeaseDurationType) { if (blobLeaseDurationType == null) { return null; } return LeaseDurationType.fromString(blobLeaseDurationType.toString()); } private static LeaseStateType toDataLakeLeaseStateType(com.azure.storage.blob.models.LeaseStateType blobLeaseStateType) { if (blobLeaseStateType == null) { return null; } return LeaseStateType.fromString(blobLeaseStateType.toString()); } private static LeaseStatusType toDataLakeLeaseStatusType(com.azure.storage.blob.models.LeaseStatusType blobLeaseStatusType) { if (blobLeaseStatusType == null) { return null; } return LeaseStatusType.fromString(blobLeaseStatusType.toString()); } private static PublicAccessType toDataLakePublicAccessType(com.azure.storage.blob.models.PublicAccessType blobPublicAccessType) { if (blobPublicAccessType == null) { return null; } return PublicAccessType.fromString(blobPublicAccessType.toString()); } private static CopyStatusType toDataLakeCopyStatusType( com.azure.storage.blob.models.CopyStatusType blobCopyStatus) { if (blobCopyStatus == null) { return null; } return CopyStatusType.fromString(blobCopyStatus.toString()); } private static ArchiveStatus toDataLakeArchiveStatus( com.azure.storage.blob.models.ArchiveStatus blobArchiveStatus) { if (blobArchiveStatus == null) { return null; } return ArchiveStatus.fromString(blobArchiveStatus.toString()); } private static AccessTier toDataLakeAccessTier(com.azure.storage.blob.models.AccessTier blobAccessTier) { if (blobAccessTier == null) { return null; } return AccessTier.fromString(blobAccessTier.toString()); } static FileSystemProperties toFileSystemProperties(BlobContainerProperties blobContainerProperties) { if (blobContainerProperties == null) { return null; } return new FileSystemProperties(blobContainerProperties.getMetadata(), blobContainerProperties.getETag(), blobContainerProperties.getLastModified(), Transforms.toDataLakeLeaseDurationType(blobContainerProperties.getLeaseDuration()), Transforms.toDataLakeLeaseStateType(blobContainerProperties.getLeaseState()), Transforms.toDataLakeLeaseStatusType(blobContainerProperties.getLeaseStatus()), Transforms.toDataLakePublicAccessType(blobContainerProperties.getBlobPublicAccess()), blobContainerProperties.hasImmutabilityPolicy(), blobContainerProperties.hasLegalHold()); } private static BlobContainerListDetails toBlobContainerListDetails(FileSystemListDetails fileSystemListDetails) { return new BlobContainerListDetails() .setRetrieveMetadata(fileSystemListDetails.getRetrieveMetadata()); } static ListBlobContainersOptions toListBlobContainersOptions(ListFileSystemsOptions listFileSystemsOptions) { return new ListBlobContainersOptions() .setDetails(toBlobContainerListDetails(listFileSystemsOptions.getDetails())) .setMaxResultsPerPage(listFileSystemsOptions.getMaxResultsPerPage()) .setPrefix(listFileSystemsOptions.getPrefix()); } static UserDelegationKey toDataLakeUserDelegationKey(com.azure.storage.blob.models.UserDelegationKey blobUserDelegationKey) { if (blobUserDelegationKey == null) { return null; } return new UserDelegationKey() .setSignedExpiry(blobUserDelegationKey.getSignedExpiry()) .setSignedObjectId(blobUserDelegationKey.getSignedObjectId()) .setSignedTenantId(blobUserDelegationKey.getSignedTenantId()) .setSignedService(blobUserDelegationKey.getSignedService()) .setSignedStart(blobUserDelegationKey.getSignedStart()) .setSignedVersion(blobUserDelegationKey.getSignedVersion()) .setValue(blobUserDelegationKey.getValue()); } static BlobHttpHeaders toBlobHttpHeaders(PathHttpHeaders pathHTTPHeaders) { if (pathHTTPHeaders == null) { return null; } return new BlobHttpHeaders() .setCacheControl(pathHTTPHeaders.getCacheControl()) .setContentDisposition(pathHTTPHeaders.getContentDisposition()) .setContentEncoding(pathHTTPHeaders.getContentEncoding()) .setContentLanguage(pathHTTPHeaders.getContentLanguage()) .setContentType(pathHTTPHeaders.getContentType()) .setContentMd5(pathHTTPHeaders.getContentMd5()); } static BlobRange toBlobRange(FileRange fileRange) { if (fileRange == null) { return null; } return new BlobRange(fileRange.getOffset(), fileRange.getCount()); } static com.azure.storage.blob.models.DownloadRetryOptions toBlobDownloadRetryOptions( DownloadRetryOptions dataLakeOptions) { if (dataLakeOptions == null) { return null; } return new com.azure.storage.blob.models.DownloadRetryOptions() .setMaxRetryRequests(dataLakeOptions.getMaxRetryRequests()); } static PathProperties toPathProperties(BlobProperties properties) { if (properties == null) { return null; } else { return new PathProperties(properties.getCreationTime(), properties.getLastModified(), properties.getETag(), properties.getBlobSize(), properties.getContentType(), properties.getContentMd5(), properties.getContentEncoding(), properties.getContentDisposition(), properties.getContentLanguage(), properties.getCacheControl(), Transforms.toDataLakeLeaseStatusType(properties.getLeaseStatus()), Transforms.toDataLakeLeaseStateType(properties.getLeaseState()), Transforms.toDataLakeLeaseDurationType(properties.getLeaseDuration()), properties.getCopyId(), Transforms.toDataLakeCopyStatusType(properties.getCopyStatus()), properties.getCopySource(), properties.getCopyProgress(), properties.getCopyCompletionTime(), properties.getCopyStatusDescription(), properties.isServerEncrypted(), properties.isIncrementalCopy(), Transforms.toDataLakeAccessTier(properties.getAccessTier()), Transforms.toDataLakeArchiveStatus(properties.getArchiveStatus()), properties.getEncryptionKeySha256(), properties.getAccessTierChangeTime(), properties.getMetadata()); } } static FileSystemItem toFileSystemItem(BlobContainerItem blobContainerItem) { if (blobContainerItem == null) { return null; } return new FileSystemItem() .setName(blobContainerItem.getName()) .setMetadata(blobContainerItem.getMetadata()) .setProperties(Transforms.toFileSystemItemProperties(blobContainerItem.getProperties())); } private static FileSystemItemProperties toFileSystemItemProperties( BlobContainerItemProperties blobContainerItemProperties) { if (blobContainerItemProperties == null) { return null; } return new FileSystemItemProperties() .setETag(blobContainerItemProperties.getETag()) .setLastModified(blobContainerItemProperties.getLastModified()) .setLeaseStatus(toDataLakeLeaseStatusType(blobContainerItemProperties.getLeaseStatus())) .setLeaseState(toDataLakeLeaseStateType(blobContainerItemProperties.getLeaseState())) .setLeaseDuration(toDataLakeLeaseDurationType(blobContainerItemProperties.getLeaseDuration())) .setPublicAccess(toDataLakePublicAccessType(blobContainerItemProperties.getPublicAccess())) .setHasLegalHold(blobContainerItemProperties.isHasLegalHold()) .setHasImmutabilityPolicy(blobContainerItemProperties.isHasImmutabilityPolicy()); } static PathItem toPathItem(Path path) { if (path == null) { return null; } return new PathItem(path.getETag(), OffsetDateTime.parse(path.getLastModified(), DateTimeFormatter.RFC_1123_DATE_TIME), path.getContentLength(), path.getGroup(), path.isDirectory() == null ? false : path.isDirectory(), path.getName(), path.getOwner(), path.getPermissions()); } static BlobRequestConditions toBlobRequestConditions(DataLakeRequestConditions requestConditions) { if (requestConditions == null) { return null; } return new BlobRequestConditions() .setLeaseId(requestConditions.getLeaseId()) .setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince()) .setIfNoneMatch(requestConditions.getIfNoneMatch()) .setIfMatch(requestConditions.getIfMatch()) .setIfModifiedSince(requestConditions.getIfModifiedSince()); } static FileReadResponse toFileReadResponse(BlobDownloadResponse r) { if (r == null) { return null; } return new FileReadResponse(Transforms.toFileReadAsyncResponse(new BlobDownloadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileReadAsyncResponse toFileReadAsyncResponse(BlobDownloadAsyncResponse r) { if (r == null) { return null; } return new FileReadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toPathReadHeaders(r.getDeserializedHeaders())); } private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h) { if (h == null) { return null; } return new FileReadHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setIsServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMD5(h.getBlobContentMD5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()); } static List<BlobSignedIdentifier> toBlobIdentifierList(List<DataLakeSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<BlobSignedIdentifier> blobIdentifiers = new ArrayList<>(); for (DataLakeSignedIdentifier identifier : identifiers) { blobIdentifiers.add(Transforms.toBlobIdentifier(identifier)); } return blobIdentifiers; } private static BlobAccessPolicy toBlobAccessPolicy(DataLakeAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new BlobAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static FileSystemAccessPolicies toFileSystemAccessPolicies(BlobContainerAccessPolicies accessPolicies) { if (accessPolicies == null) { return null; } return new FileSystemAccessPolicies(Transforms.toDataLakePublicAccessType(accessPolicies.getBlobAccessType()), Transforms.toDataLakeIdentifierList(accessPolicies.getIdentifiers())); } static List<DataLakeSignedIdentifier> toDataLakeIdentifierList(List<BlobSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<DataLakeSignedIdentifier> dataLakeIdentifiers = new ArrayList<>(); for (BlobSignedIdentifier identifier : identifiers) { dataLakeIdentifiers.add(Transforms.toDataLakeIdentifier(identifier)); } return dataLakeIdentifiers; } private static DataLakeSignedIdentifier toDataLakeIdentifier(BlobSignedIdentifier identifier) { if (identifier == null) { return null; } return new DataLakeSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toDataLakeAccessPolicy(identifier.getAccessPolicy())); } private static DataLakeAccessPolicy toDataLakeAccessPolicy(BlobAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new DataLakeAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } }
Since the datalake API just calls into the blob API, we're basically just spitting out exactly what was given to us to pipe it to the corresponding type, so I think its fine to have that null check
private static BlobSignedIdentifier toBlobIdentifier(DataLakeSignedIdentifier identifier) { if (identifier == null) { return null; } return new BlobSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toBlobAccessPolicy(identifier.getAccessPolicy())); }
if (identifier == null) {
private static BlobSignedIdentifier toBlobIdentifier(DataLakeSignedIdentifier identifier) { if (identifier == null) { return null; } return new BlobSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toBlobAccessPolicy(identifier.getAccessPolicy())); }
class Transforms { static com.azure.storage.blob.models.PublicAccessType toBlobPublicAccessType(PublicAccessType fileSystemPublicAccessType) { if (fileSystemPublicAccessType == null) { return null; } return com.azure.storage.blob.models.PublicAccessType.fromString(fileSystemPublicAccessType.toString()); } private static LeaseDurationType toDataLakeLeaseDurationType(com.azure.storage.blob.models.LeaseDurationType blobLeaseDurationType) { if (blobLeaseDurationType == null) { return null; } return LeaseDurationType.fromString(blobLeaseDurationType.toString()); } private static LeaseStateType toDataLakeLeaseStateType(com.azure.storage.blob.models.LeaseStateType blobLeaseStateType) { if (blobLeaseStateType == null) { return null; } return LeaseStateType.fromString(blobLeaseStateType.toString()); } private static LeaseStatusType toDataLakeLeaseStatusType(com.azure.storage.blob.models.LeaseStatusType blobLeaseStatusType) { if (blobLeaseStatusType == null) { return null; } return LeaseStatusType.fromString(blobLeaseStatusType.toString()); } private static PublicAccessType toDataLakePublicAccessType(com.azure.storage.blob.models.PublicAccessType blobPublicAccessType) { if (blobPublicAccessType == null) { return null; } return PublicAccessType.fromString(blobPublicAccessType.toString()); } private static CopyStatusType toDataLakeCopyStatusType( com.azure.storage.blob.models.CopyStatusType blobCopyStatus) { if (blobCopyStatus == null) { return null; } return CopyStatusType.fromString(blobCopyStatus.toString()); } private static ArchiveStatus toDataLakeArchiveStatus( com.azure.storage.blob.models.ArchiveStatus blobArchiveStatus) { if (blobArchiveStatus == null) { return null; } return ArchiveStatus.fromString(blobArchiveStatus.toString()); } private static AccessTier toDataLakeAccessTier(com.azure.storage.blob.models.AccessTier blobAccessTier) { if (blobAccessTier == null) { return null; } return AccessTier.fromString(blobAccessTier.toString()); } static FileSystemProperties toFileSystemProperties(BlobContainerProperties blobContainerProperties) { if (blobContainerProperties == null) { return null; } return new FileSystemProperties(blobContainerProperties.getMetadata(), blobContainerProperties.getETag(), blobContainerProperties.getLastModified(), Transforms.toDataLakeLeaseDurationType(blobContainerProperties.getLeaseDuration()), Transforms.toDataLakeLeaseStateType(blobContainerProperties.getLeaseState()), Transforms.toDataLakeLeaseStatusType(blobContainerProperties.getLeaseStatus()), Transforms.toDataLakePublicAccessType(blobContainerProperties.getBlobPublicAccess()), blobContainerProperties.hasImmutabilityPolicy(), blobContainerProperties.hasLegalHold()); } private static BlobContainerListDetails toBlobContainerListDetails(FileSystemListDetails fileSystemListDetails) { return new BlobContainerListDetails() .setRetrieveMetadata(fileSystemListDetails.getRetrieveMetadata()); } static ListBlobContainersOptions toListBlobContainersOptions(ListFileSystemsOptions listFileSystemsOptions) { return new ListBlobContainersOptions() .setDetails(toBlobContainerListDetails(listFileSystemsOptions.getDetails())) .setMaxResultsPerPage(listFileSystemsOptions.getMaxResultsPerPage()) .setPrefix(listFileSystemsOptions.getPrefix()); } static UserDelegationKey toDataLakeUserDelegationKey(com.azure.storage.blob.models.UserDelegationKey blobUserDelegationKey) { if (blobUserDelegationKey == null) { return null; } return new UserDelegationKey() .setSignedExpiry(blobUserDelegationKey.getSignedExpiry()) .setSignedObjectId(blobUserDelegationKey.getSignedObjectId()) .setSignedTenantId(blobUserDelegationKey.getSignedTenantId()) .setSignedService(blobUserDelegationKey.getSignedService()) .setSignedStart(blobUserDelegationKey.getSignedStart()) .setSignedVersion(blobUserDelegationKey.getSignedVersion()) .setValue(blobUserDelegationKey.getValue()); } static BlobHttpHeaders toBlobHttpHeaders(PathHttpHeaders pathHTTPHeaders) { if (pathHTTPHeaders == null) { return null; } return new BlobHttpHeaders() .setCacheControl(pathHTTPHeaders.getCacheControl()) .setContentDisposition(pathHTTPHeaders.getContentDisposition()) .setContentEncoding(pathHTTPHeaders.getContentEncoding()) .setContentLanguage(pathHTTPHeaders.getContentLanguage()) .setContentType(pathHTTPHeaders.getContentType()) .setContentMd5(pathHTTPHeaders.getContentMd5()); } static BlobRange toBlobRange(FileRange fileRange) { if (fileRange == null) { return null; } return new BlobRange(fileRange.getOffset(), fileRange.getCount()); } static com.azure.storage.blob.models.DownloadRetryOptions toBlobDownloadRetryOptions( DownloadRetryOptions dataLakeOptions) { if (dataLakeOptions == null) { return null; } return new com.azure.storage.blob.models.DownloadRetryOptions() .setMaxRetryRequests(dataLakeOptions.getMaxRetryRequests()); } static PathProperties toPathProperties(BlobProperties properties) { if (properties == null) { return null; } else { return new PathProperties(properties.getCreationTime(), properties.getLastModified(), properties.getETag(), properties.getBlobSize(), properties.getContentType(), properties.getContentMd5(), properties.getContentEncoding(), properties.getContentDisposition(), properties.getContentLanguage(), properties.getCacheControl(), Transforms.toDataLakeLeaseStatusType(properties.getLeaseStatus()), Transforms.toDataLakeLeaseStateType(properties.getLeaseState()), Transforms.toDataLakeLeaseDurationType(properties.getLeaseDuration()), properties.getCopyId(), Transforms.toDataLakeCopyStatusType(properties.getCopyStatus()), properties.getCopySource(), properties.getCopyProgress(), properties.getCopyCompletionTime(), properties.getCopyStatusDescription(), properties.isServerEncrypted(), properties.isIncrementalCopy(), Transforms.toDataLakeAccessTier(properties.getAccessTier()), Transforms.toDataLakeArchiveStatus(properties.getArchiveStatus()), properties.getEncryptionKeySha256(), properties.getAccessTierChangeTime(), properties.getMetadata()); } } static FileSystemItem toFileSystemItem(BlobContainerItem blobContainerItem) { if (blobContainerItem == null) { return null; } return new FileSystemItem() .setName(blobContainerItem.getName()) .setMetadata(blobContainerItem.getMetadata()) .setProperties(Transforms.toFileSystemItemProperties(blobContainerItem.getProperties())); } private static FileSystemItemProperties toFileSystemItemProperties( BlobContainerItemProperties blobContainerItemProperties) { if (blobContainerItemProperties == null) { return null; } return new FileSystemItemProperties() .setETag(blobContainerItemProperties.getETag()) .setLastModified(blobContainerItemProperties.getLastModified()) .setLeaseStatus(toDataLakeLeaseStatusType(blobContainerItemProperties.getLeaseStatus())) .setLeaseState(toDataLakeLeaseStateType(blobContainerItemProperties.getLeaseState())) .setLeaseDuration(toDataLakeLeaseDurationType(blobContainerItemProperties.getLeaseDuration())) .setPublicAccess(toDataLakePublicAccessType(blobContainerItemProperties.getPublicAccess())) .setHasLegalHold(blobContainerItemProperties.isHasLegalHold()) .setHasImmutabilityPolicy(blobContainerItemProperties.isHasImmutabilityPolicy()); } static PathItem toPathItem(Path path) { if (path == null) { return null; } return new PathItem(path.getETag(), OffsetDateTime.parse(path.getLastModified(), DateTimeFormatter.RFC_1123_DATE_TIME), path.getContentLength(), path.getGroup(), path.isDirectory() == null ? false : path.isDirectory(), path.getName(), path.getOwner(), path.getPermissions()); } static BlobRequestConditions toBlobRequestConditions(DataLakeRequestConditions requestConditions) { if (requestConditions == null) { return null; } return new BlobRequestConditions() .setLeaseId(requestConditions.getLeaseId()) .setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince()) .setIfNoneMatch(requestConditions.getIfNoneMatch()) .setIfMatch(requestConditions.getIfMatch()) .setIfModifiedSince(requestConditions.getIfModifiedSince()); } static FileReadResponse toFileReadResponse(BlobDownloadResponse r) { if (r == null) { return null; } return new FileReadResponse(Transforms.toFileReadAsyncResponse(new BlobDownloadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileReadAsyncResponse toFileReadAsyncResponse(BlobDownloadAsyncResponse r) { if (r == null) { return null; } return new FileReadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toPathReadHeaders(r.getDeserializedHeaders())); } private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h) { if (h == null) { return null; } return new FileReadHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setIsServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMD5(h.getBlobContentMD5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()); } static List<BlobSignedIdentifier> toBlobIdentifierList(List<DataLakeSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<BlobSignedIdentifier> blobIdentifiers = new ArrayList<>(); for (DataLakeSignedIdentifier identifier : identifiers) { blobIdentifiers.add(Transforms.toBlobIdentifier(identifier)); } return blobIdentifiers; } private static BlobAccessPolicy toBlobAccessPolicy(DataLakeAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new BlobAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static FileSystemAccessPolicies toFileSystemAccessPolicies(BlobContainerAccessPolicies accessPolicies) { if (accessPolicies == null) { return null; } return new FileSystemAccessPolicies(Transforms.toDataLakePublicAccessType(accessPolicies.getBlobAccessType()), Transforms.toDataLakeIdentifierList(accessPolicies.getIdentifiers())); } static List<DataLakeSignedIdentifier> toDataLakeIdentifierList(List<BlobSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<DataLakeSignedIdentifier> dataLakeIdentifiers = new ArrayList<>(); for (BlobSignedIdentifier identifier : identifiers) { dataLakeIdentifiers.add(Transforms.toDataLakeIdentifier(identifier)); } return dataLakeIdentifiers; } private static DataLakeSignedIdentifier toDataLakeIdentifier(BlobSignedIdentifier identifier) { if (identifier == null) { return null; } return new DataLakeSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toDataLakeAccessPolicy(identifier.getAccessPolicy())); } private static DataLakeAccessPolicy toDataLakeAccessPolicy(BlobAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new DataLakeAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } }
class Transforms { static com.azure.storage.blob.models.PublicAccessType toBlobPublicAccessType(PublicAccessType fileSystemPublicAccessType) { if (fileSystemPublicAccessType == null) { return null; } return com.azure.storage.blob.models.PublicAccessType.fromString(fileSystemPublicAccessType.toString()); } private static LeaseDurationType toDataLakeLeaseDurationType(com.azure.storage.blob.models.LeaseDurationType blobLeaseDurationType) { if (blobLeaseDurationType == null) { return null; } return LeaseDurationType.fromString(blobLeaseDurationType.toString()); } private static LeaseStateType toDataLakeLeaseStateType(com.azure.storage.blob.models.LeaseStateType blobLeaseStateType) { if (blobLeaseStateType == null) { return null; } return LeaseStateType.fromString(blobLeaseStateType.toString()); } private static LeaseStatusType toDataLakeLeaseStatusType(com.azure.storage.blob.models.LeaseStatusType blobLeaseStatusType) { if (blobLeaseStatusType == null) { return null; } return LeaseStatusType.fromString(blobLeaseStatusType.toString()); } private static PublicAccessType toDataLakePublicAccessType(com.azure.storage.blob.models.PublicAccessType blobPublicAccessType) { if (blobPublicAccessType == null) { return null; } return PublicAccessType.fromString(blobPublicAccessType.toString()); } private static CopyStatusType toDataLakeCopyStatusType( com.azure.storage.blob.models.CopyStatusType blobCopyStatus) { if (blobCopyStatus == null) { return null; } return CopyStatusType.fromString(blobCopyStatus.toString()); } private static ArchiveStatus toDataLakeArchiveStatus( com.azure.storage.blob.models.ArchiveStatus blobArchiveStatus) { if (blobArchiveStatus == null) { return null; } return ArchiveStatus.fromString(blobArchiveStatus.toString()); } private static AccessTier toDataLakeAccessTier(com.azure.storage.blob.models.AccessTier blobAccessTier) { if (blobAccessTier == null) { return null; } return AccessTier.fromString(blobAccessTier.toString()); } static FileSystemProperties toFileSystemProperties(BlobContainerProperties blobContainerProperties) { if (blobContainerProperties == null) { return null; } return new FileSystemProperties(blobContainerProperties.getMetadata(), blobContainerProperties.getETag(), blobContainerProperties.getLastModified(), Transforms.toDataLakeLeaseDurationType(blobContainerProperties.getLeaseDuration()), Transforms.toDataLakeLeaseStateType(blobContainerProperties.getLeaseState()), Transforms.toDataLakeLeaseStatusType(blobContainerProperties.getLeaseStatus()), Transforms.toDataLakePublicAccessType(blobContainerProperties.getBlobPublicAccess()), blobContainerProperties.hasImmutabilityPolicy(), blobContainerProperties.hasLegalHold()); } private static BlobContainerListDetails toBlobContainerListDetails(FileSystemListDetails fileSystemListDetails) { return new BlobContainerListDetails() .setRetrieveMetadata(fileSystemListDetails.getRetrieveMetadata()); } static ListBlobContainersOptions toListBlobContainersOptions(ListFileSystemsOptions listFileSystemsOptions) { return new ListBlobContainersOptions() .setDetails(toBlobContainerListDetails(listFileSystemsOptions.getDetails())) .setMaxResultsPerPage(listFileSystemsOptions.getMaxResultsPerPage()) .setPrefix(listFileSystemsOptions.getPrefix()); } static UserDelegationKey toDataLakeUserDelegationKey(com.azure.storage.blob.models.UserDelegationKey blobUserDelegationKey) { if (blobUserDelegationKey == null) { return null; } return new UserDelegationKey() .setSignedExpiry(blobUserDelegationKey.getSignedExpiry()) .setSignedObjectId(blobUserDelegationKey.getSignedObjectId()) .setSignedTenantId(blobUserDelegationKey.getSignedTenantId()) .setSignedService(blobUserDelegationKey.getSignedService()) .setSignedStart(blobUserDelegationKey.getSignedStart()) .setSignedVersion(blobUserDelegationKey.getSignedVersion()) .setValue(blobUserDelegationKey.getValue()); } static BlobHttpHeaders toBlobHttpHeaders(PathHttpHeaders pathHTTPHeaders) { if (pathHTTPHeaders == null) { return null; } return new BlobHttpHeaders() .setCacheControl(pathHTTPHeaders.getCacheControl()) .setContentDisposition(pathHTTPHeaders.getContentDisposition()) .setContentEncoding(pathHTTPHeaders.getContentEncoding()) .setContentLanguage(pathHTTPHeaders.getContentLanguage()) .setContentType(pathHTTPHeaders.getContentType()) .setContentMd5(pathHTTPHeaders.getContentMd5()); } static BlobRange toBlobRange(FileRange fileRange) { if (fileRange == null) { return null; } return new BlobRange(fileRange.getOffset(), fileRange.getCount()); } static com.azure.storage.blob.models.DownloadRetryOptions toBlobDownloadRetryOptions( DownloadRetryOptions dataLakeOptions) { if (dataLakeOptions == null) { return null; } return new com.azure.storage.blob.models.DownloadRetryOptions() .setMaxRetryRequests(dataLakeOptions.getMaxRetryRequests()); } static PathProperties toPathProperties(BlobProperties properties) { if (properties == null) { return null; } else { return new PathProperties(properties.getCreationTime(), properties.getLastModified(), properties.getETag(), properties.getBlobSize(), properties.getContentType(), properties.getContentMd5(), properties.getContentEncoding(), properties.getContentDisposition(), properties.getContentLanguage(), properties.getCacheControl(), Transforms.toDataLakeLeaseStatusType(properties.getLeaseStatus()), Transforms.toDataLakeLeaseStateType(properties.getLeaseState()), Transforms.toDataLakeLeaseDurationType(properties.getLeaseDuration()), properties.getCopyId(), Transforms.toDataLakeCopyStatusType(properties.getCopyStatus()), properties.getCopySource(), properties.getCopyProgress(), properties.getCopyCompletionTime(), properties.getCopyStatusDescription(), properties.isServerEncrypted(), properties.isIncrementalCopy(), Transforms.toDataLakeAccessTier(properties.getAccessTier()), Transforms.toDataLakeArchiveStatus(properties.getArchiveStatus()), properties.getEncryptionKeySha256(), properties.getAccessTierChangeTime(), properties.getMetadata()); } } static FileSystemItem toFileSystemItem(BlobContainerItem blobContainerItem) { if (blobContainerItem == null) { return null; } return new FileSystemItem() .setName(blobContainerItem.getName()) .setMetadata(blobContainerItem.getMetadata()) .setProperties(Transforms.toFileSystemItemProperties(blobContainerItem.getProperties())); } private static FileSystemItemProperties toFileSystemItemProperties( BlobContainerItemProperties blobContainerItemProperties) { if (blobContainerItemProperties == null) { return null; } return new FileSystemItemProperties() .setETag(blobContainerItemProperties.getETag()) .setLastModified(blobContainerItemProperties.getLastModified()) .setLeaseStatus(toDataLakeLeaseStatusType(blobContainerItemProperties.getLeaseStatus())) .setLeaseState(toDataLakeLeaseStateType(blobContainerItemProperties.getLeaseState())) .setLeaseDuration(toDataLakeLeaseDurationType(blobContainerItemProperties.getLeaseDuration())) .setPublicAccess(toDataLakePublicAccessType(blobContainerItemProperties.getPublicAccess())) .setHasLegalHold(blobContainerItemProperties.isHasLegalHold()) .setHasImmutabilityPolicy(blobContainerItemProperties.isHasImmutabilityPolicy()); } static PathItem toPathItem(Path path) { if (path == null) { return null; } return new PathItem(path.getETag(), OffsetDateTime.parse(path.getLastModified(), DateTimeFormatter.RFC_1123_DATE_TIME), path.getContentLength(), path.getGroup(), path.isDirectory() == null ? false : path.isDirectory(), path.getName(), path.getOwner(), path.getPermissions()); } static BlobRequestConditions toBlobRequestConditions(DataLakeRequestConditions requestConditions) { if (requestConditions == null) { return null; } return new BlobRequestConditions() .setLeaseId(requestConditions.getLeaseId()) .setIfUnmodifiedSince(requestConditions.getIfUnmodifiedSince()) .setIfNoneMatch(requestConditions.getIfNoneMatch()) .setIfMatch(requestConditions.getIfMatch()) .setIfModifiedSince(requestConditions.getIfModifiedSince()); } static FileReadResponse toFileReadResponse(BlobDownloadResponse r) { if (r == null) { return null; } return new FileReadResponse(Transforms.toFileReadAsyncResponse(new BlobDownloadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), null, r.getDeserializedHeaders()))); } static FileReadAsyncResponse toFileReadAsyncResponse(BlobDownloadAsyncResponse r) { if (r == null) { return null; } return new FileReadAsyncResponse(r.getRequest(), r.getStatusCode(), r.getHeaders(), r.getValue(), Transforms.toPathReadHeaders(r.getDeserializedHeaders())); } private static FileReadHeaders toPathReadHeaders(BlobDownloadHeaders h) { if (h == null) { return null; } return new FileReadHeaders() .setLastModified(h.getLastModified()) .setMetadata(h.getMetadata()) .setContentLength(h.getContentLength()) .setContentType(h.getContentType()) .setContentRange(h.getContentRange()) .setETag(h.getETag()) .setContentMd5(h.getContentMd5()) .setContentEncoding(h.getContentEncoding()) .setCacheControl(h.getCacheControl()) .setContentDisposition(h.getContentDisposition()) .setContentLanguage(h.getContentLanguage()) .setCopyCompletionTime(h.getCopyCompletionTime()) .setCopyStatusDescription(h.getCopyStatusDescription()) .setCopyId(h.getCopyId()) .setCopyProgress(h.getCopyProgress()) .setCopySource(h.getCopySource()) .setCopyStatus(Transforms.toDataLakeCopyStatusType(h.getCopyStatus())) .setLeaseDuration(Transforms.toDataLakeLeaseDurationType(h.getLeaseDuration())) .setLeaseState(Transforms.toDataLakeLeaseStateType(h.getLeaseState())) .setLeaseStatus(Transforms.toDataLakeLeaseStatusType(h.getLeaseStatus())) .setClientRequestId(h.getClientRequestId()) .setRequestId(h.getRequestId()) .setVersion(h.getVersion()) .setAcceptRanges(h.getAcceptRanges()) .setDateProperty(h.getDateProperty()) .setIsServerEncrypted(h.isServerEncrypted()) .setEncryptionKeySha256(h.getEncryptionKeySha256()) .setFileContentMD5(h.getBlobContentMD5()) .setContentCrc64(h.getContentCrc64()) .setErrorCode(h.getErrorCode()); } static List<BlobSignedIdentifier> toBlobIdentifierList(List<DataLakeSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<BlobSignedIdentifier> blobIdentifiers = new ArrayList<>(); for (DataLakeSignedIdentifier identifier : identifiers) { blobIdentifiers.add(Transforms.toBlobIdentifier(identifier)); } return blobIdentifiers; } private static BlobAccessPolicy toBlobAccessPolicy(DataLakeAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new BlobAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } static FileSystemAccessPolicies toFileSystemAccessPolicies(BlobContainerAccessPolicies accessPolicies) { if (accessPolicies == null) { return null; } return new FileSystemAccessPolicies(Transforms.toDataLakePublicAccessType(accessPolicies.getBlobAccessType()), Transforms.toDataLakeIdentifierList(accessPolicies.getIdentifiers())); } static List<DataLakeSignedIdentifier> toDataLakeIdentifierList(List<BlobSignedIdentifier> identifiers) { if (identifiers == null) { return null; } List<DataLakeSignedIdentifier> dataLakeIdentifiers = new ArrayList<>(); for (BlobSignedIdentifier identifier : identifiers) { dataLakeIdentifiers.add(Transforms.toDataLakeIdentifier(identifier)); } return dataLakeIdentifiers; } private static DataLakeSignedIdentifier toDataLakeIdentifier(BlobSignedIdentifier identifier) { if (identifier == null) { return null; } return new DataLakeSignedIdentifier() .setId(identifier.getId()) .setAccessPolicy(Transforms.toDataLakeAccessPolicy(identifier.getAccessPolicy())); } private static DataLakeAccessPolicy toDataLakeAccessPolicy(BlobAccessPolicy accessPolicy) { if (accessPolicy == null) { return null; } return new DataLakeAccessPolicy() .setExpiresOn(accessPolicy.getExpiresOn()) .setStartsOn(accessPolicy.getStartsOn()) .setPermissions(accessPolicy.getPermissions()); } }
minor spelling: thown -> thrown
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); this.logger.debug("Found {} leases, taking {}", allLeases.size(), leasesToTake.size()); if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } ZonedDateTime stopTimer = ZonedDateTime.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100)) .repeat( () -> { ZonedDateTime currentTime = ZonedDateTime.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).last(); }) ); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thown while trying to acquire available leases", throwable); return Mono.empty(); }) .repeat(() -> { return !cancellationToken.isCancellationRequested(); }) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
logger.warn("Unexpected exception thown while trying to acquire available leases", throwable);
private Mono<Void> run(CancellationToken cancellationToken) { return Flux.just(this) .flatMap(value -> this.leaseContainer.getAllLeases()) .collectList() .flatMap(allLeases -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); List<Lease> leasesToTake = this.partitionLoadBalancingStrategy.selectLeasesToTake(allLeases); this.logger.debug("Found {} leases, taking {}", allLeases.size(), leasesToTake.size()); if (cancellationToken.isCancellationRequested()) return Mono.empty(); return Flux.fromIterable(leasesToTake) .flatMap(lease -> { if (cancellationToken.isCancellationRequested()) return Mono.empty(); return this.partitionController.addOrUpdateLease(lease); }) .then(Mono.just(this) .flatMap(value -> { if (cancellationToken.isCancellationRequested()) { return Mono.empty(); } ZonedDateTime stopTimer = ZonedDateTime.now().plus(this.leaseAcquireInterval); return Mono.just(value) .delayElement(Duration.ofMillis(100)) .repeat( () -> { ZonedDateTime currentTime = ZonedDateTime.now(); return !cancellationToken.isCancellationRequested() && currentTime.isBefore(stopTimer); }).last(); }) ); }) .onErrorResume(throwable -> { logger.warn("Unexpected exception thrown while trying to acquire available leases", throwable); return Mono.empty(); }) .repeat(() -> { return !cancellationToken.isCancellationRequested(); }) .then() .onErrorResume(throwable -> { logger.info("Partition load balancer task stopped."); return this.stop(); }); }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { if (partitionController == null) { throw new IllegalArgumentException("partitionController"); } if (leaseContainer == null) { throw new IllegalArgumentException("leaseContainer"); } if (partitionLoadBalancingStrategy == null) { throw new IllegalArgumentException("partitionLoadBalancingStrategy"); } if (scheduler == null) { throw new IllegalArgumentException("executorService"); } this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
class PartitionLoadBalancerImpl implements PartitionLoadBalancer { private final Logger logger = LoggerFactory.getLogger(PartitionLoadBalancerImpl.class); private final PartitionController partitionController; private final LeaseContainer leaseContainer; private final PartitionLoadBalancingStrategy partitionLoadBalancingStrategy; private final Duration leaseAcquireInterval; private final Scheduler scheduler; private CancellationTokenSource cancellationTokenSource; private volatile boolean started; private final Object lock; public PartitionLoadBalancerImpl( PartitionController partitionController, LeaseContainer leaseContainer, PartitionLoadBalancingStrategy partitionLoadBalancingStrategy, Duration leaseAcquireInterval, Scheduler scheduler) { if (partitionController == null) { throw new IllegalArgumentException("partitionController"); } if (leaseContainer == null) { throw new IllegalArgumentException("leaseContainer"); } if (partitionLoadBalancingStrategy == null) { throw new IllegalArgumentException("partitionLoadBalancingStrategy"); } if (scheduler == null) { throw new IllegalArgumentException("executorService"); } this.partitionController = partitionController; this.leaseContainer = leaseContainer; this.partitionLoadBalancingStrategy = partitionLoadBalancingStrategy; this.leaseAcquireInterval = leaseAcquireInterval; this.scheduler = scheduler; this.started = false; this.lock = new Object(); } @Override public Mono<Void> start() { synchronized (lock) { if (this.started) { throw new IllegalStateException("Partition load balancer already started"); } this.cancellationTokenSource = new CancellationTokenSource(); this.started = true; } return Mono.fromRunnable( () -> { scheduler.schedule(() -> this.run(this.cancellationTokenSource.getToken()).subscribe()); }); } @Override public Mono<Void> stop() { synchronized (lock) { this.started = false; this.cancellationTokenSource.cancel(); } return this.partitionController.shutdown(); } @Override public boolean isRunning() { return this.started; } }
Should this call into the `byte[]` constructor instead? ```java this(Objects.requireNonNull(body, "'body' cannot be null.").array()); ```
public EventData(ByteBuffer body) { this.body = Objects.requireNonNull(body, "'body' cannot be null.").array(); this.context = Context.NONE; this.properties = new HashMap<>(); this.systemProperties = new SystemProperties(); }
this.body = Objects.requireNonNull(body, "'body' cannot be null.").array();
public EventData(ByteBuffer body) { this(Objects.requireNonNull(body, "'body' cannot be null.").array()); }
class EventData { /* * These are properties owned by the service and set when a message is received. */ static final Set<String> RESERVED_SYSTEM_PROPERTIES; private final Map<String, Object> properties; private final byte[] body; private final SystemProperties systemProperties; private Context context; static { final Set<String> properties = new HashSet<>(); properties.add(OFFSET_ANNOTATION_NAME.getValue()); properties.add(PARTITION_KEY_ANNOTATION_NAME.getValue()); properties.add(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); properties.add(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()); properties.add(PUBLISHER_ANNOTATION_NAME.getValue()); RESERVED_SYSTEM_PROPERTIES = Collections.unmodifiableSet(properties); } /** * Creates an event containing the {@code data}. * * @param body The data to set for this event. */ public EventData(byte[] body) { this.body = Objects.requireNonNull(body, "'body' cannot be null."); this.context = Context.NONE; this.properties = new HashMap<>(); this.systemProperties = new SystemProperties(); } /** * Creates an event containing the {@code body}. * * @param body The data to set for this event. * @throws NullPointerException if {@code body} is {@code null}. */ /** * Creates an event by encoding the {@code body} using UTF-8 charset. * * @param body The string that will be UTF-8 encoded to create an event. */ public EventData(String body) { this(body.getBytes(UTF_8)); } /** * Creates an event with the given {@code body}, system properties and context. * * @param body The data to set for this event. * @param systemProperties System properties set by message broker for this event. * @param context A specified key-value pair of type {@link Context}. * @throws NullPointerException if {@code body}, {@code systemProperties}, or {@code context} is {@code null}. */ EventData(byte[] body, SystemProperties systemProperties, Context context) { this.body = Objects.requireNonNull(body, "'body' cannot be null."); this.context = Objects.requireNonNull(context, "'context' cannot be null."); this.systemProperties = Objects.requireNonNull(systemProperties, "'systemProperties' cannot be null."); this.properties = new HashMap<>(); } /** * A specified key-value pair of type {@link Context} to set additional information on the event. * * @return the {@link Context} object set on the event */ Context getContext() { return context; } /** * Adds a new key value pair to the existing context on Event Data. * * @param key The key for this context object * @param value The value for this context object. * @throws NullPointerException if {@code key} or {@code value} is null. * @return The updated {@link EventData}. */ EventData addContext(String key, Object value) { Objects.requireNonNull(key, "The 'key' parameter cannot be null."); Objects.requireNonNull(value, "The 'value' parameter cannot be null."); this.context = context.addData(key, value); return this; } /** * The set of free-form event properties which may be used for passing metadata associated with the event with the * event body during Event Hubs operations. A common use case for {@code properties()} is to associate serialization * hints for the {@link * </p> * * <p><strong>Adding serialization hint using {@code getProperties()}</strong></p> * <p>In the sample, the type of telemetry is indicated by adding an application property with key "eventType".</p> * * {@codesnippet com.azure.messaging.eventhubs.eventdata.getProperties} * * @return Application properties associated with this {@link EventData}. */ public Map<String, Object> getProperties() { return properties; } /** * Properties that are populated by EventHubService. As these are populated by Service, they are only present on a * <b>received</b> EventData. * * @return an encapsulation of all SystemProperties appended by EventHubs service into EventData. {@code null} if * the {@link EventData} is not received and is created by the public constructors. */ public Map<String, Object> getSystemProperties() { return systemProperties; } /** * Gets the actual payload/data wrapped by EventData. * * <p> * If the means for deserializing the raw data is not apparent to consumers, a common technique is to make use of * {@link * wish to deserialize the binary data. * </p> * * @return ByteBuffer representing the data. */ public byte[] getBody() { return body; } /** * Returns event data as UTF-8 decoded string. * * @return UTF-8 decoded string representation of the event data. */ public String getBodyAsString() { return new String(body, UTF_8); } /** * Gets the offset of the event when it was received from the associated Event Hub partition. * * @return The offset within the Event Hub partition of the received event. {@code null} if the EventData was not * received from Event Hub service. */ public Long getOffset() { return systemProperties.getOffset(); } /** * Gets a partition key used for message partitioning. If it exists, this value was used to compute a hash to select * a partition to send the message to. * * @return A partition key for this Event Data. {@code null} if the EventData was not received from Event Hub * service or there was no partition key set when the event was sent to the Event Hub. */ public String getPartitionKey() { return systemProperties.getPartitionKey(); } /** * Gets the instant, in UTC, of when the event was enqueued in the Event Hub partition. * * @return The instant, in UTC, this was enqueued in the Event Hub partition. {@code null} if the EventData was not * received from Event Hub service. */ public Instant getEnqueuedTime() { return systemProperties.getEnqueuedTime(); } /** * Gets the sequence number assigned to the event when it was enqueued in the associated Event Hub partition. This * is unique for every message received in the Event Hub partition. * * @return The sequence number for this event. {@code null} if the EventData was not received from Event Hub * service. */ public Long getSequenceNumber() { return systemProperties.getSequenceNumber(); } /** * {@inheritDoc} */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } EventData eventData = (EventData) o; return Arrays.equals(body, eventData.body); } /** * {@inheritDoc} */ @Override public int hashCode() { return Arrays.hashCode(body); } /** * A collection of properties populated by Azure Event Hubs service. */ static class SystemProperties extends HashMap<String, Object> { private static final long serialVersionUID = -2827050124966993723L; private final Long offset; private final String partitionKey; private final Instant enqueuedTime; private final Long sequenceNumber; SystemProperties() { super(); offset = null; partitionKey = null; enqueuedTime = null; sequenceNumber = null; } SystemProperties(final Map<String, Object> map) { super(map); this.partitionKey = removeSystemProperty(PARTITION_KEY_ANNOTATION_NAME.getValue()); final String offset = removeSystemProperty(OFFSET_ANNOTATION_NAME.getValue()); if (offset == null) { throw new IllegalStateException(String.format(Locale.US, "offset: %s should always be in map.", OFFSET_ANNOTATION_NAME.getValue())); } this.offset = Long.valueOf(offset); final Date enqueuedTimeValue = removeSystemProperty(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()); if (enqueuedTimeValue == null) { throw new IllegalStateException(String.format(Locale.US, "enqueuedTime: %s should always be in map.", ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue())); } this.enqueuedTime = enqueuedTimeValue.toInstant(); final Long sequenceNumber = removeSystemProperty(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); if (sequenceNumber == null) { throw new IllegalStateException(String.format(Locale.US, "sequenceNumber: %s should always be in map.", SEQUENCE_NUMBER_ANNOTATION_NAME.getValue())); } this.sequenceNumber = sequenceNumber; } /** * Gets the offset within the Event Hubs stream. * * @return The offset within the Event Hubs stream. */ private Long getOffset() { return offset; } /** * Gets a partition key used for message partitioning. If it exists, this value was used to compute a hash to * select a partition to send the message to. * * @return A partition key for this Event Data. */ private String getPartitionKey() { return partitionKey; } /** * Gets the time this event was enqueued in the Event Hub. * * @return The time this was enqueued in the service. */ private Instant getEnqueuedTime() { return enqueuedTime; } /** * Gets the sequence number in the event stream for this event. This is unique for every message received in the * Event Hub. * * @return Sequence number for this event. * @throws IllegalStateException if {@link SystemProperties} does not contain the sequence number in a retrieved * event. */ private Long getSequenceNumber() { return sequenceNumber; } @SuppressWarnings("unchecked") private <T> T removeSystemProperty(final String key) { if (this.containsKey(key)) { return (T) (this.remove(key)); } return null; } } }
class EventData { /* * These are properties owned by the service and set when a message is received. */ static final Set<String> RESERVED_SYSTEM_PROPERTIES; private final Map<String, Object> properties; private final byte[] body; private final SystemProperties systemProperties; private Context context; static { final Set<String> properties = new HashSet<>(); properties.add(OFFSET_ANNOTATION_NAME.getValue()); properties.add(PARTITION_KEY_ANNOTATION_NAME.getValue()); properties.add(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); properties.add(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()); properties.add(PUBLISHER_ANNOTATION_NAME.getValue()); RESERVED_SYSTEM_PROPERTIES = Collections.unmodifiableSet(properties); } /** * Creates an event containing the {@code data}. * * @param body The data to set for this event. */ public EventData(byte[] body) { this.body = Objects.requireNonNull(body, "'body' cannot be null."); this.context = Context.NONE; this.properties = new HashMap<>(); this.systemProperties = new SystemProperties(); } /** * Creates an event containing the {@code body}. * * @param body The data to set for this event. * @throws NullPointerException if {@code body} is {@code null}. */ /** * Creates an event by encoding the {@code body} using UTF-8 charset. * * @param body The string that will be UTF-8 encoded to create an event. */ public EventData(String body) { this(body.getBytes(UTF_8)); } /** * Creates an event with the given {@code body}, system properties and context. * * @param body The data to set for this event. * @param systemProperties System properties set by message broker for this event. * @param context A specified key-value pair of type {@link Context}. * @throws NullPointerException if {@code body}, {@code systemProperties}, or {@code context} is {@code null}. */ EventData(byte[] body, SystemProperties systemProperties, Context context) { this.body = Objects.requireNonNull(body, "'body' cannot be null."); this.context = Objects.requireNonNull(context, "'context' cannot be null."); this.systemProperties = Objects.requireNonNull(systemProperties, "'systemProperties' cannot be null."); this.properties = new HashMap<>(); } /** * A specified key-value pair of type {@link Context} to set additional information on the event. * * @return the {@link Context} object set on the event */ Context getContext() { return context; } /** * Adds a new key value pair to the existing context on Event Data. * * @param key The key for this context object * @param value The value for this context object. * @throws NullPointerException if {@code key} or {@code value} is null. * @return The updated {@link EventData}. */ EventData addContext(String key, Object value) { Objects.requireNonNull(key, "The 'key' parameter cannot be null."); Objects.requireNonNull(value, "The 'value' parameter cannot be null."); this.context = context.addData(key, value); return this; } /** * The set of free-form event properties which may be used for passing metadata associated with the event with the * event body during Event Hubs operations. A common use case for {@code properties()} is to associate serialization * hints for the {@link * </p> * * <p><strong>Adding serialization hint using {@code getProperties()}</strong></p> * <p>In the sample, the type of telemetry is indicated by adding an application property with key "eventType".</p> * * {@codesnippet com.azure.messaging.eventhubs.eventdata.getProperties} * * @return Application properties associated with this {@link EventData}. */ public Map<String, Object> getProperties() { return properties; } /** * Properties that are populated by EventHubService. As these are populated by Service, they are only present on a * <b>received</b> EventData. * * @return an encapsulation of all SystemProperties appended by EventHubs service into EventData. {@code null} if * the {@link EventData} is not received and is created by the public constructors. */ public Map<String, Object> getSystemProperties() { return systemProperties; } /** * Gets the actual payload/data wrapped by EventData. * * <p> * If the means for deserializing the raw data is not apparent to consumers, a common technique is to make use of * {@link * wish to deserialize the binary data. * </p> * * @return ByteBuffer representing the data. */ public byte[] getBody() { return Arrays.copyOf(body, body.length); } /** * Returns event data as UTF-8 decoded string. * * @return UTF-8 decoded string representation of the event data. */ public String getBodyAsString() { return new String(body, UTF_8); } /** * Gets the offset of the event when it was received from the associated Event Hub partition. * * @return The offset within the Event Hub partition of the received event. {@code null} if the EventData was not * received from Event Hub service. */ public Long getOffset() { return systemProperties.getOffset(); } /** * Gets a partition key used for message partitioning. If it exists, this value was used to compute a hash to select * a partition to send the message to. * * @return A partition key for this Event Data. {@code null} if the EventData was not received from Event Hub * service or there was no partition key set when the event was sent to the Event Hub. */ public String getPartitionKey() { return systemProperties.getPartitionKey(); } /** * Gets the instant, in UTC, of when the event was enqueued in the Event Hub partition. * * @return The instant, in UTC, this was enqueued in the Event Hub partition. {@code null} if the EventData was not * received from Event Hub service. */ public Instant getEnqueuedTime() { return systemProperties.getEnqueuedTime(); } /** * Gets the sequence number assigned to the event when it was enqueued in the associated Event Hub partition. This * is unique for every message received in the Event Hub partition. * * @return The sequence number for this event. {@code null} if the EventData was not received from Event Hub * service. */ public Long getSequenceNumber() { return systemProperties.getSequenceNumber(); } /** * {@inheritDoc} */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } EventData eventData = (EventData) o; return Arrays.equals(body, eventData.body); } /** * {@inheritDoc} */ @Override public int hashCode() { return Arrays.hashCode(body); } /** * A collection of properties populated by Azure Event Hubs service. */ static class SystemProperties extends HashMap<String, Object> { private static final long serialVersionUID = -2827050124966993723L; private final Long offset; private final String partitionKey; private final Instant enqueuedTime; private final Long sequenceNumber; SystemProperties() { super(); offset = null; partitionKey = null; enqueuedTime = null; sequenceNumber = null; } SystemProperties(final Map<String, Object> map) { super(map); this.partitionKey = removeSystemProperty(PARTITION_KEY_ANNOTATION_NAME.getValue()); final String offset = removeSystemProperty(OFFSET_ANNOTATION_NAME.getValue()); if (offset == null) { throw new IllegalStateException(String.format(Locale.US, "offset: %s should always be in map.", OFFSET_ANNOTATION_NAME.getValue())); } this.offset = Long.valueOf(offset); final Date enqueuedTimeValue = removeSystemProperty(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()); if (enqueuedTimeValue == null) { throw new IllegalStateException(String.format(Locale.US, "enqueuedTime: %s should always be in map.", ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue())); } this.enqueuedTime = enqueuedTimeValue.toInstant(); final Long sequenceNumber = removeSystemProperty(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); if (sequenceNumber == null) { throw new IllegalStateException(String.format(Locale.US, "sequenceNumber: %s should always be in map.", SEQUENCE_NUMBER_ANNOTATION_NAME.getValue())); } this.sequenceNumber = sequenceNumber; } /** * Gets the offset within the Event Hubs stream. * * @return The offset within the Event Hubs stream. */ private Long getOffset() { return offset; } /** * Gets a partition key used for message partitioning. If it exists, this value was used to compute a hash to * select a partition to send the message to. * * @return A partition key for this Event Data. */ private String getPartitionKey() { return partitionKey; } /** * Gets the time this event was enqueued in the Event Hub. * * @return The time this was enqueued in the service. */ private Instant getEnqueuedTime() { return enqueuedTime; } /** * Gets the sequence number in the event stream for this event. This is unique for every message received in the * Event Hub. * * @return Sequence number for this event. * @throws IllegalStateException if {@link SystemProperties} does not contain the sequence number in a retrieved * event. */ private Long getSequenceNumber() { return sequenceNumber; } @SuppressWarnings("unchecked") private <T> T removeSystemProperty(final String key) { if (this.containsKey(key)) { return (T) (this.remove(key)); } return null; } } }
good catch. Fixed
public EventData(ByteBuffer body) { this.body = Objects.requireNonNull(body, "'body' cannot be null.").array(); this.context = Context.NONE; this.properties = new HashMap<>(); this.systemProperties = new SystemProperties(); }
this.body = Objects.requireNonNull(body, "'body' cannot be null.").array();
public EventData(ByteBuffer body) { this(Objects.requireNonNull(body, "'body' cannot be null.").array()); }
class EventData { /* * These are properties owned by the service and set when a message is received. */ static final Set<String> RESERVED_SYSTEM_PROPERTIES; private final Map<String, Object> properties; private final byte[] body; private final SystemProperties systemProperties; private Context context; static { final Set<String> properties = new HashSet<>(); properties.add(OFFSET_ANNOTATION_NAME.getValue()); properties.add(PARTITION_KEY_ANNOTATION_NAME.getValue()); properties.add(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); properties.add(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()); properties.add(PUBLISHER_ANNOTATION_NAME.getValue()); RESERVED_SYSTEM_PROPERTIES = Collections.unmodifiableSet(properties); } /** * Creates an event containing the {@code data}. * * @param body The data to set for this event. */ public EventData(byte[] body) { this.body = Objects.requireNonNull(body, "'body' cannot be null."); this.context = Context.NONE; this.properties = new HashMap<>(); this.systemProperties = new SystemProperties(); } /** * Creates an event containing the {@code body}. * * @param body The data to set for this event. * @throws NullPointerException if {@code body} is {@code null}. */ /** * Creates an event by encoding the {@code body} using UTF-8 charset. * * @param body The string that will be UTF-8 encoded to create an event. */ public EventData(String body) { this(body.getBytes(UTF_8)); } /** * Creates an event with the given {@code body}, system properties and context. * * @param body The data to set for this event. * @param systemProperties System properties set by message broker for this event. * @param context A specified key-value pair of type {@link Context}. * @throws NullPointerException if {@code body}, {@code systemProperties}, or {@code context} is {@code null}. */ EventData(byte[] body, SystemProperties systemProperties, Context context) { this.body = Objects.requireNonNull(body, "'body' cannot be null."); this.context = Objects.requireNonNull(context, "'context' cannot be null."); this.systemProperties = Objects.requireNonNull(systemProperties, "'systemProperties' cannot be null."); this.properties = new HashMap<>(); } /** * A specified key-value pair of type {@link Context} to set additional information on the event. * * @return the {@link Context} object set on the event */ Context getContext() { return context; } /** * Adds a new key value pair to the existing context on Event Data. * * @param key The key for this context object * @param value The value for this context object. * @throws NullPointerException if {@code key} or {@code value} is null. * @return The updated {@link EventData}. */ EventData addContext(String key, Object value) { Objects.requireNonNull(key, "The 'key' parameter cannot be null."); Objects.requireNonNull(value, "The 'value' parameter cannot be null."); this.context = context.addData(key, value); return this; } /** * The set of free-form event properties which may be used for passing metadata associated with the event with the * event body during Event Hubs operations. A common use case for {@code properties()} is to associate serialization * hints for the {@link * </p> * * <p><strong>Adding serialization hint using {@code getProperties()}</strong></p> * <p>In the sample, the type of telemetry is indicated by adding an application property with key "eventType".</p> * * {@codesnippet com.azure.messaging.eventhubs.eventdata.getProperties} * * @return Application properties associated with this {@link EventData}. */ public Map<String, Object> getProperties() { return properties; } /** * Properties that are populated by EventHubService. As these are populated by Service, they are only present on a * <b>received</b> EventData. * * @return an encapsulation of all SystemProperties appended by EventHubs service into EventData. {@code null} if * the {@link EventData} is not received and is created by the public constructors. */ public Map<String, Object> getSystemProperties() { return systemProperties; } /** * Gets the actual payload/data wrapped by EventData. * * <p> * If the means for deserializing the raw data is not apparent to consumers, a common technique is to make use of * {@link * wish to deserialize the binary data. * </p> * * @return ByteBuffer representing the data. */ public byte[] getBody() { return body; } /** * Returns event data as UTF-8 decoded string. * * @return UTF-8 decoded string representation of the event data. */ public String getBodyAsString() { return new String(body, UTF_8); } /** * Gets the offset of the event when it was received from the associated Event Hub partition. * * @return The offset within the Event Hub partition of the received event. {@code null} if the EventData was not * received from Event Hub service. */ public Long getOffset() { return systemProperties.getOffset(); } /** * Gets a partition key used for message partitioning. If it exists, this value was used to compute a hash to select * a partition to send the message to. * * @return A partition key for this Event Data. {@code null} if the EventData was not received from Event Hub * service or there was no partition key set when the event was sent to the Event Hub. */ public String getPartitionKey() { return systemProperties.getPartitionKey(); } /** * Gets the instant, in UTC, of when the event was enqueued in the Event Hub partition. * * @return The instant, in UTC, this was enqueued in the Event Hub partition. {@code null} if the EventData was not * received from Event Hub service. */ public Instant getEnqueuedTime() { return systemProperties.getEnqueuedTime(); } /** * Gets the sequence number assigned to the event when it was enqueued in the associated Event Hub partition. This * is unique for every message received in the Event Hub partition. * * @return The sequence number for this event. {@code null} if the EventData was not received from Event Hub * service. */ public Long getSequenceNumber() { return systemProperties.getSequenceNumber(); } /** * {@inheritDoc} */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } EventData eventData = (EventData) o; return Arrays.equals(body, eventData.body); } /** * {@inheritDoc} */ @Override public int hashCode() { return Arrays.hashCode(body); } /** * A collection of properties populated by Azure Event Hubs service. */ static class SystemProperties extends HashMap<String, Object> { private static final long serialVersionUID = -2827050124966993723L; private final Long offset; private final String partitionKey; private final Instant enqueuedTime; private final Long sequenceNumber; SystemProperties() { super(); offset = null; partitionKey = null; enqueuedTime = null; sequenceNumber = null; } SystemProperties(final Map<String, Object> map) { super(map); this.partitionKey = removeSystemProperty(PARTITION_KEY_ANNOTATION_NAME.getValue()); final String offset = removeSystemProperty(OFFSET_ANNOTATION_NAME.getValue()); if (offset == null) { throw new IllegalStateException(String.format(Locale.US, "offset: %s should always be in map.", OFFSET_ANNOTATION_NAME.getValue())); } this.offset = Long.valueOf(offset); final Date enqueuedTimeValue = removeSystemProperty(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()); if (enqueuedTimeValue == null) { throw new IllegalStateException(String.format(Locale.US, "enqueuedTime: %s should always be in map.", ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue())); } this.enqueuedTime = enqueuedTimeValue.toInstant(); final Long sequenceNumber = removeSystemProperty(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); if (sequenceNumber == null) { throw new IllegalStateException(String.format(Locale.US, "sequenceNumber: %s should always be in map.", SEQUENCE_NUMBER_ANNOTATION_NAME.getValue())); } this.sequenceNumber = sequenceNumber; } /** * Gets the offset within the Event Hubs stream. * * @return The offset within the Event Hubs stream. */ private Long getOffset() { return offset; } /** * Gets a partition key used for message partitioning. If it exists, this value was used to compute a hash to * select a partition to send the message to. * * @return A partition key for this Event Data. */ private String getPartitionKey() { return partitionKey; } /** * Gets the time this event was enqueued in the Event Hub. * * @return The time this was enqueued in the service. */ private Instant getEnqueuedTime() { return enqueuedTime; } /** * Gets the sequence number in the event stream for this event. This is unique for every message received in the * Event Hub. * * @return Sequence number for this event. * @throws IllegalStateException if {@link SystemProperties} does not contain the sequence number in a retrieved * event. */ private Long getSequenceNumber() { return sequenceNumber; } @SuppressWarnings("unchecked") private <T> T removeSystemProperty(final String key) { if (this.containsKey(key)) { return (T) (this.remove(key)); } return null; } } }
class EventData { /* * These are properties owned by the service and set when a message is received. */ static final Set<String> RESERVED_SYSTEM_PROPERTIES; private final Map<String, Object> properties; private final byte[] body; private final SystemProperties systemProperties; private Context context; static { final Set<String> properties = new HashSet<>(); properties.add(OFFSET_ANNOTATION_NAME.getValue()); properties.add(PARTITION_KEY_ANNOTATION_NAME.getValue()); properties.add(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); properties.add(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()); properties.add(PUBLISHER_ANNOTATION_NAME.getValue()); RESERVED_SYSTEM_PROPERTIES = Collections.unmodifiableSet(properties); } /** * Creates an event containing the {@code data}. * * @param body The data to set for this event. */ public EventData(byte[] body) { this.body = Objects.requireNonNull(body, "'body' cannot be null."); this.context = Context.NONE; this.properties = new HashMap<>(); this.systemProperties = new SystemProperties(); } /** * Creates an event containing the {@code body}. * * @param body The data to set for this event. * @throws NullPointerException if {@code body} is {@code null}. */ /** * Creates an event by encoding the {@code body} using UTF-8 charset. * * @param body The string that will be UTF-8 encoded to create an event. */ public EventData(String body) { this(body.getBytes(UTF_8)); } /** * Creates an event with the given {@code body}, system properties and context. * * @param body The data to set for this event. * @param systemProperties System properties set by message broker for this event. * @param context A specified key-value pair of type {@link Context}. * @throws NullPointerException if {@code body}, {@code systemProperties}, or {@code context} is {@code null}. */ EventData(byte[] body, SystemProperties systemProperties, Context context) { this.body = Objects.requireNonNull(body, "'body' cannot be null."); this.context = Objects.requireNonNull(context, "'context' cannot be null."); this.systemProperties = Objects.requireNonNull(systemProperties, "'systemProperties' cannot be null."); this.properties = new HashMap<>(); } /** * A specified key-value pair of type {@link Context} to set additional information on the event. * * @return the {@link Context} object set on the event */ Context getContext() { return context; } /** * Adds a new key value pair to the existing context on Event Data. * * @param key The key for this context object * @param value The value for this context object. * @throws NullPointerException if {@code key} or {@code value} is null. * @return The updated {@link EventData}. */ EventData addContext(String key, Object value) { Objects.requireNonNull(key, "The 'key' parameter cannot be null."); Objects.requireNonNull(value, "The 'value' parameter cannot be null."); this.context = context.addData(key, value); return this; } /** * The set of free-form event properties which may be used for passing metadata associated with the event with the * event body during Event Hubs operations. A common use case for {@code properties()} is to associate serialization * hints for the {@link * </p> * * <p><strong>Adding serialization hint using {@code getProperties()}</strong></p> * <p>In the sample, the type of telemetry is indicated by adding an application property with key "eventType".</p> * * {@codesnippet com.azure.messaging.eventhubs.eventdata.getProperties} * * @return Application properties associated with this {@link EventData}. */ public Map<String, Object> getProperties() { return properties; } /** * Properties that are populated by EventHubService. As these are populated by Service, they are only present on a * <b>received</b> EventData. * * @return an encapsulation of all SystemProperties appended by EventHubs service into EventData. {@code null} if * the {@link EventData} is not received and is created by the public constructors. */ public Map<String, Object> getSystemProperties() { return systemProperties; } /** * Gets the actual payload/data wrapped by EventData. * * <p> * If the means for deserializing the raw data is not apparent to consumers, a common technique is to make use of * {@link * wish to deserialize the binary data. * </p> * * @return ByteBuffer representing the data. */ public byte[] getBody() { return Arrays.copyOf(body, body.length); } /** * Returns event data as UTF-8 decoded string. * * @return UTF-8 decoded string representation of the event data. */ public String getBodyAsString() { return new String(body, UTF_8); } /** * Gets the offset of the event when it was received from the associated Event Hub partition. * * @return The offset within the Event Hub partition of the received event. {@code null} if the EventData was not * received from Event Hub service. */ public Long getOffset() { return systemProperties.getOffset(); } /** * Gets a partition key used for message partitioning. If it exists, this value was used to compute a hash to select * a partition to send the message to. * * @return A partition key for this Event Data. {@code null} if the EventData was not received from Event Hub * service or there was no partition key set when the event was sent to the Event Hub. */ public String getPartitionKey() { return systemProperties.getPartitionKey(); } /** * Gets the instant, in UTC, of when the event was enqueued in the Event Hub partition. * * @return The instant, in UTC, this was enqueued in the Event Hub partition. {@code null} if the EventData was not * received from Event Hub service. */ public Instant getEnqueuedTime() { return systemProperties.getEnqueuedTime(); } /** * Gets the sequence number assigned to the event when it was enqueued in the associated Event Hub partition. This * is unique for every message received in the Event Hub partition. * * @return The sequence number for this event. {@code null} if the EventData was not received from Event Hub * service. */ public Long getSequenceNumber() { return systemProperties.getSequenceNumber(); } /** * {@inheritDoc} */ @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } EventData eventData = (EventData) o; return Arrays.equals(body, eventData.body); } /** * {@inheritDoc} */ @Override public int hashCode() { return Arrays.hashCode(body); } /** * A collection of properties populated by Azure Event Hubs service. */ static class SystemProperties extends HashMap<String, Object> { private static final long serialVersionUID = -2827050124966993723L; private final Long offset; private final String partitionKey; private final Instant enqueuedTime; private final Long sequenceNumber; SystemProperties() { super(); offset = null; partitionKey = null; enqueuedTime = null; sequenceNumber = null; } SystemProperties(final Map<String, Object> map) { super(map); this.partitionKey = removeSystemProperty(PARTITION_KEY_ANNOTATION_NAME.getValue()); final String offset = removeSystemProperty(OFFSET_ANNOTATION_NAME.getValue()); if (offset == null) { throw new IllegalStateException(String.format(Locale.US, "offset: %s should always be in map.", OFFSET_ANNOTATION_NAME.getValue())); } this.offset = Long.valueOf(offset); final Date enqueuedTimeValue = removeSystemProperty(ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue()); if (enqueuedTimeValue == null) { throw new IllegalStateException(String.format(Locale.US, "enqueuedTime: %s should always be in map.", ENQUEUED_TIME_UTC_ANNOTATION_NAME.getValue())); } this.enqueuedTime = enqueuedTimeValue.toInstant(); final Long sequenceNumber = removeSystemProperty(SEQUENCE_NUMBER_ANNOTATION_NAME.getValue()); if (sequenceNumber == null) { throw new IllegalStateException(String.format(Locale.US, "sequenceNumber: %s should always be in map.", SEQUENCE_NUMBER_ANNOTATION_NAME.getValue())); } this.sequenceNumber = sequenceNumber; } /** * Gets the offset within the Event Hubs stream. * * @return The offset within the Event Hubs stream. */ private Long getOffset() { return offset; } /** * Gets a partition key used for message partitioning. If it exists, this value was used to compute a hash to * select a partition to send the message to. * * @return A partition key for this Event Data. */ private String getPartitionKey() { return partitionKey; } /** * Gets the time this event was enqueued in the Event Hub. * * @return The time this was enqueued in the service. */ private Instant getEnqueuedTime() { return enqueuedTime; } /** * Gets the sequence number in the event stream for this event. This is unique for every message received in the * Event Hub. * * @return Sequence number for this event. * @throws IllegalStateException if {@link SystemProperties} does not contain the sequence number in a retrieved * event. */ private Long getSequenceNumber() { return sequenceNumber; } @SuppressWarnings("unchecked") private <T> T removeSystemProperty(final String key) { if (this.containsKey(key)) { return (T) (this.remove(key)); } return null; } } }
nit: Add a comment that we wanted to simulate the customer's program doing other work until it is ready to stop.
public static void main(String[] args) throws Exception { BlobContainerAsyncClient blobContainerAsyncClient = new BlobContainerClientBuilder() .connectionString(STORAGE_CONNECTION_STRING) .containerName("<< CONTAINER NAME >>") .sasToken(SAS_TOKEN_STRING) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .buildAsyncClient(); EventProcessorClientBuilder eventProcessorClientBuilder = new EventProcessorClientBuilder() .connectionString(EH_CONNECTION_STRING) .consumerGroup("<< CONSUMER GROUP NAME >>") .processEvent(PARTITION_PROCESSOR) .processError(ERROR_HANDLER) .checkpointStore(new BlobCheckpointStore(blobContainerAsyncClient)); EventProcessorClient eventProcessorClient = eventProcessorClientBuilder.buildEventProcessorClient(); eventProcessorClient.start(); TimeUnit.MINUTES.sleep(5); eventProcessorClient.stop(); }
TimeUnit.MINUTES.sleep(5);
public static void main(String[] args) throws Exception { BlobContainerAsyncClient blobContainerAsyncClient = new BlobContainerClientBuilder() .connectionString(STORAGE_CONNECTION_STRING) .containerName("<< CONTAINER NAME >>") .sasToken(SAS_TOKEN_STRING) .httpLogOptions(new HttpLogOptions().setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) .buildAsyncClient(); EventProcessorClientBuilder eventProcessorClientBuilder = new EventProcessorClientBuilder() .connectionString(EH_CONNECTION_STRING) .consumerGroup("<< CONSUMER GROUP NAME >>") .processEvent(PARTITION_PROCESSOR) .processError(ERROR_HANDLER) .checkpointStore(new BlobCheckpointStore(blobContainerAsyncClient)); EventProcessorClient eventProcessorClient = eventProcessorClientBuilder.buildEventProcessorClient(); eventProcessorClient.start(); TimeUnit.MINUTES.sleep(5); eventProcessorClient.stop(); }
class EventProcessorBlobCheckpointStoreSample { private static final String EH_CONNECTION_STRING = ""; private static final String SAS_TOKEN_STRING = ""; private static final String STORAGE_CONNECTION_STRING = ""; public static final Consumer<PartitionEvent> PARTITION_PROCESSOR = partitionEvent -> { System.out.printf("Processing event from partition %s with sequence number %d %n", partitionEvent.getPartitionContext().getPartitionId(), partitionEvent.getData().getSequenceNumber()); if (partitionEvent.getData().getSequenceNumber() % 10 == 0) { partitionEvent.getPartitionContext().updateCheckpoint(partitionEvent.getData()).subscribe(); } }; public static final Consumer<EventProcessingErrorContext> ERROR_HANDLER = errorContext -> { System.out.printf("Error occurred in partition processor for partition {}, {}", errorContext.getPartitionContext().getPartitionId(), errorContext.getThrowable()); }; /** * The main method to run the sample. * * @param args Unused arguments to the sample. * @throws Exception if there are any errors while running the sample program. */ }
class EventProcessorBlobCheckpointStoreSample { private static final String EH_CONNECTION_STRING = ""; private static final String SAS_TOKEN_STRING = ""; private static final String STORAGE_CONNECTION_STRING = ""; public static final Consumer<PartitionEvent> PARTITION_PROCESSOR = partitionEvent -> { System.out.printf("Processing event from partition %s with sequence number %d %n", partitionEvent.getPartitionContext().getPartitionId(), partitionEvent.getData().getSequenceNumber()); if (partitionEvent.getData().getSequenceNumber() % 10 == 0) { partitionEvent.getPartitionContext().updateCheckpoint(partitionEvent.getData()).subscribe(); } }; public static final Consumer<EventProcessingErrorContext> ERROR_HANDLER = errorContext -> { System.out.printf("Error occurred in partition processor for partition {}, {}", errorContext.getPartitionContext().getPartitionId(), errorContext.getThrowable()); }; /** * The main method to run the sample. * * @param args Unused arguments to the sample. * @throws Exception if there are any errors while running the sample program. */ }
Please use diamond operator, you don't have to repeat time a second time. `ArrayList<Integer> a1 = new ArrayList<>();`
public void queryDocumentsArrayValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<List<Integer>> expectedValues = new ArrayList<>(); ArrayList<Integer> a1 = new ArrayList<Integer>(); ArrayList<Integer> a2 = new ArrayList<Integer>(); a1.add(6519456); a1.add(1471916863); a2.add(2498434); a2.add(1455671440); expectedValues.add(a1); expectedValues.add(a2); String query = "Select top 2 value c.sgmts from c"; Flux<FeedResponse<List>> queryObservable = createdCollection.queryItems(query, options, List.class); List<List> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); }
ArrayList<Integer> a1 = new ArrayList<Integer>();
public void queryDocumentsArrayValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); Collection<List<List<Integer>>> expectedValues = new ArrayList<>(); List<List<Integer>> lists = new ArrayList<>(); List<Integer> a1 = new ArrayList<>(); ArrayList<Integer> a2 = new ArrayList<>(); a1.add(6519456); a1.add(1471916863); a2.add(2498434); a2.add(1455671440); lists.add(a1); lists.add(a2); expectedValues.add(lists); expectedValues.add(lists); expectedValues.add(lists); expectedValues.add(lists); String query = "Select top 2 value c.sgmts from c"; Flux<FeedResponse<List>> queryObservable = createdCollection.queryItems(query, options, List.class); List<List> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); }
class ParallelDocumentQueryTest extends TestSuiteBase { private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdCollection; private List<CosmosItemProperties> createdDocuments; private CosmosAsyncClient client; public String getCollectionLink() { return TestUtils.getCollectionNameLink(createdDatabase.getId(), createdCollection.getId()); } @Factory(dataProvider = "clientBuildersWithDirect") public ParallelDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @DataProvider(name = "queryMetricsArgProvider") public Object[][] queryMetricsArgProvider() { return new Object[][]{ {true}, {false}, }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocuments(boolean qmEnabled) { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); options.setMaxDegreeOfParallelism(2); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments.stream().filter(d -> 99 == d.getInt("prop") ).collect(Collectors.toList()); assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .totalSize(expectedDocs.size()) .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.getResourceId()).collect(Collectors.toList())) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator, TIMEOUT); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryMetricEquality() throws Exception { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(true); options.setMaxDegreeOfParallelism(0); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList1 = queryObservable.collectList().block(); options.setMaxDegreeOfParallelism(4); Flux<FeedResponse<CosmosItemProperties>> threadedQueryObs = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList2 = threadedQueryObs.collectList().block(); assertThat(resultList1.size()).isEqualTo(resultList2.size()); for(int i = 0; i < resultList1.size(); i++){ compareQueryMetrics(BridgeInternal.queryMetricsFromFeedResponse(resultList1.get(i)), BridgeInternal.queryMetricsFromFeedResponse(resultList2.get(i))); } } private void compareQueryMetrics(Map<String, QueryMetrics> qm1, Map<String, QueryMetrics> qm2) { assertThat(qm1.keySet().size()).isEqualTo(qm2.keySet().size()); QueryMetrics queryMetrics1 = BridgeInternal.createQueryMetricsFromCollection(qm1.values()); QueryMetrics queryMetrics2 = BridgeInternal.createQueryMetricsFromCollection(qm2.values()); assertThat(queryMetrics1.getRetrievedDocumentSize()).isEqualTo(queryMetrics2.getRetrievedDocumentSize()); assertThat(queryMetrics1.getRetrievedDocumentCount()).isEqualTo(queryMetrics2.getRetrievedDocumentCount()); assertThat(queryMetrics1.getIndexHitDocumentCount()).isEqualTo(queryMetrics2.getIndexHitDocumentCount()); assertThat(queryMetrics1.getOutputDocumentCount()).isEqualTo(queryMetrics2.getOutputDocumentCount()); assertThat(queryMetrics1.getOutputDocumentSize()).isEqualTo(queryMetrics2.getOutputDocumentSize()); assertThat(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()) .isEqualTo(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() { String query = "SELECT * from root r where r.id = '2'"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPagesIsGreaterThanOrEqualTo(1) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .pageSizeIsLessThanOrEqualTo(0) .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void queryDocumentsWithPageSize() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); int pageSize = 3; options.maxItemCount(pageSize); options.setMaxDegreeOfParallelism(-1); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments; assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator .Builder<CosmosItemProperties>() .exactlyContainsInAnyOrder(expectedDocs .stream() .map(d -> d.getResourceId()) .collect(Collectors.toList())) .numberOfPagesIsGreaterThanOrEqualTo((expectedDocs.size() + 1) / 3) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0) .pageSizeIsLessThanOrEqualTo(pageSize) .build()) .build(); validateQuerySuccess(queryObservable, validator, 2 * subscriberValidationTimeout); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void invalidQuerySyntax() { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .notNullActivityId() .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void crossPartitionQueryNotEnabled() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void partitionKeyRangeId() { int sum = 0; for (String partitionKeyRangeId : CosmosBridgeInternal.getAsyncDocumentClient(client).readPartitionKeyRanges(getCollectionLink(), null) .flatMap(p -> Flux.fromIterable(p.getResults())) .map(Resource::getId).collectList().single().block()) { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); partitionKeyRangeIdInternal(options, partitionKeyRangeId); int queryResultCount = createdCollection.queryItems(query, options, CosmosItemProperties.class) .flatMap(p -> Flux.fromIterable(p.getResults())) .collectList().block().size(); sum += queryResultCount; } assertThat(sum).isEqualTo(createdDocuments.size()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void compositeContinuationTokenRoundTrip() throws Exception { { CompositeContinuationToken compositeContinuationToken = new CompositeContinuationToken("asdf", new Range<String>("A", "D", false, true)); String serialized = compositeContinuationToken.toString(); ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse(serialized, outCompositeContinuationToken); assertThat(succeeed).isTrue(); CompositeContinuationToken deserialized = outCompositeContinuationToken.v; String token = deserialized.getToken(); Range<String> range = deserialized.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"property\" : \"not a valid composite continuation token\"}", outCompositeContinuationToken); assertThat(succeeed).isFalse(); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"token\":\"-RID:tZFQAImzNLQLAAAAAAAAAA== assertThat(succeeed).isFalse(); } } @Test(groups = { "non-emulator" }, timeOut = TIMEOUT * 10) public void queryDocumentsWithCompositeContinuationTokens() throws Exception { String query = "SELECT * FROM c"; List<CosmosItemProperties> expectedDocs = new ArrayList<>(createdDocuments); assertThat(expectedDocs).isNotEmpty(); this.queryWithContinuationTokensAndPageSizes(query, new int[] {1, 10, 100}, expectedDocs); } @Test(groups = { "simple" }) public void queryDocumentsStringValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<String> expectedValues = createdDocuments.stream().map(d -> d.getId()).collect(Collectors.toList()); String query = "Select value c.id from c"; Flux<FeedResponse<String>> queryObservable = createdCollection.queryItems(query, options, String.class); List<String> fetchedResults = new ArrayList<>(); queryObservable.map(stringFeedResponse -> fetchedResults.addAll(stringFeedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) @Test(groups = { "simple" }) public void queryDocumentsIntegerValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<Integer> expectedValues = createdDocuments.stream().map(d -> d.getInt("prop")).collect(Collectors.toList()); String query = "Select value c.prop from c"; Flux<FeedResponse<Integer>> queryObservable = createdCollection.queryItems(query, options, Integer.class); List<Integer> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) public void queryDocumentsPojo(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); String query = "Select * from c"; Flux<FeedResponse<TestObject>> queryObservable = createdCollection.queryItems(query, options, TestObject.class); List<TestObject> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); System.out.println("fetchedResults = " + fetchedResults); } @BeforeClass(groups = { "simple", "non-emulator" }, timeOut = 4 * SETUP_TIMEOUT) public void before_ParallelDocumentQueryTest() { client = clientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); try { truncateCollection(createdCollection); } catch (Throwable firstChanceException) { try { truncateCollection(createdCollection); } catch (Throwable lastChanceException) { String message = Strings.lenientFormat("container %s truncation failed due to first chance %s followed by last chance %s", createdCollection, firstChanceException, lastChanceException); logger.error(message); fail(message, lastChanceException); } } List<CosmosItemProperties> docDefList = new ArrayList<>(); for (int i = 0; i < 13; i++) { docDefList.add(getDocumentDefinition(i)); } for (int i = 0; i < 21; i++) { docDefList.add(getDocumentDefinition(99)); } createdDocuments = bulkInsertBlocking(createdCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } @AfterClass(groups = { "simple", "non-emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private static CosmosItemProperties getDocumentDefinition(int cnt) { String uuid = UUID.randomUUID().toString(); boolean boolVal = cnt % 2 == 0; CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"prop\" : %d, " + "\"boolProp\" : %b, " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, cnt, boolVal, uuid)); return doc; } static class TestObject{ String id; int prop; Boolean boolProp; String mypk; List<List<Integer>> sgmts; public String getId() { return id; } public void setId(String id) { this.id = id; } public Integer getProp() { return prop; } public void setProp(Integer prop) { this.prop = prop; } public Boolean getBoolProp() { return boolProp; } public void setBoolProp(Boolean boolProp) { this.boolProp = boolProp; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public List<List<Integer>> getSgmts() { return sgmts; } public void setSgmts(List<List<Integer>> sgmts) { this.sgmts = sgmts; } } @Test(groups = { "simple" }, timeOut = TIMEOUT, enabled = false) public void invalidQuerySytax() throws Exception { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder().instanceOf(CosmosClientException.class) .statusCode(400).notNullActivityId().build(); validateQueryFailure(queryObservable, validator); } public CosmosItemProperties createDocument(CosmosAsyncContainer cosmosContainer, int cnt) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(cnt); return cosmosContainer.createItem(docDefinition).block().getProperties(); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<CosmosItemProperties> expectedDocs) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.getResourceId()); } List<String> expectedIds = new ArrayList<String>(); for (CosmosItemProperties document : expectedDocs) { expectedIds.add(document.getResourceId()); } assertThat(actualIds).containsOnlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } }
class ParallelDocumentQueryTest extends TestSuiteBase { private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdCollection; private List<CosmosItemProperties> createdDocuments; private CosmosAsyncClient client; public String getCollectionLink() { return TestUtils.getCollectionNameLink(createdDatabase.getId(), createdCollection.getId()); } @Factory(dataProvider = "clientBuildersWithDirect") public ParallelDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @DataProvider(name = "queryMetricsArgProvider") public Object[][] queryMetricsArgProvider() { return new Object[][]{ {true}, {false}, }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocuments(boolean qmEnabled) { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); options.setMaxDegreeOfParallelism(2); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments.stream().filter(d -> 99 == d.getInt("prop") ).collect(Collectors.toList()); assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .totalSize(expectedDocs.size()) .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.getResourceId()).collect(Collectors.toList())) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator, TIMEOUT); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryMetricEquality() throws Exception { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(true); options.setMaxDegreeOfParallelism(0); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList1 = queryObservable.collectList().block(); options.setMaxDegreeOfParallelism(4); Flux<FeedResponse<CosmosItemProperties>> threadedQueryObs = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList2 = threadedQueryObs.collectList().block(); assertThat(resultList1.size()).isEqualTo(resultList2.size()); for(int i = 0; i < resultList1.size(); i++){ compareQueryMetrics(BridgeInternal.queryMetricsFromFeedResponse(resultList1.get(i)), BridgeInternal.queryMetricsFromFeedResponse(resultList2.get(i))); } } private void compareQueryMetrics(Map<String, QueryMetrics> qm1, Map<String, QueryMetrics> qm2) { assertThat(qm1.keySet().size()).isEqualTo(qm2.keySet().size()); QueryMetrics queryMetrics1 = BridgeInternal.createQueryMetricsFromCollection(qm1.values()); QueryMetrics queryMetrics2 = BridgeInternal.createQueryMetricsFromCollection(qm2.values()); assertThat(queryMetrics1.getRetrievedDocumentSize()).isEqualTo(queryMetrics2.getRetrievedDocumentSize()); assertThat(queryMetrics1.getRetrievedDocumentCount()).isEqualTo(queryMetrics2.getRetrievedDocumentCount()); assertThat(queryMetrics1.getIndexHitDocumentCount()).isEqualTo(queryMetrics2.getIndexHitDocumentCount()); assertThat(queryMetrics1.getOutputDocumentCount()).isEqualTo(queryMetrics2.getOutputDocumentCount()); assertThat(queryMetrics1.getOutputDocumentSize()).isEqualTo(queryMetrics2.getOutputDocumentSize()); assertThat(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()) .isEqualTo(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() { String query = "SELECT * from root r where r.id = '2'"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPagesIsGreaterThanOrEqualTo(1) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .pageSizeIsLessThanOrEqualTo(0) .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void queryDocumentsWithPageSize() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); int pageSize = 3; options.maxItemCount(pageSize); options.setMaxDegreeOfParallelism(-1); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments; assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator .Builder<CosmosItemProperties>() .exactlyContainsInAnyOrder(expectedDocs .stream() .map(d -> d.getResourceId()) .collect(Collectors.toList())) .numberOfPagesIsGreaterThanOrEqualTo((expectedDocs.size() + 1) / 3) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0) .pageSizeIsLessThanOrEqualTo(pageSize) .build()) .build(); validateQuerySuccess(queryObservable, validator, 2 * subscriberValidationTimeout); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void invalidQuerySyntax() { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .notNullActivityId() .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void crossPartitionQueryNotEnabled() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void partitionKeyRangeId() { int sum = 0; for (String partitionKeyRangeId : CosmosBridgeInternal.getAsyncDocumentClient(client).readPartitionKeyRanges(getCollectionLink(), null) .flatMap(p -> Flux.fromIterable(p.getResults())) .map(Resource::getId).collectList().single().block()) { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); partitionKeyRangeIdInternal(options, partitionKeyRangeId); int queryResultCount = createdCollection.queryItems(query, options, CosmosItemProperties.class) .flatMap(p -> Flux.fromIterable(p.getResults())) .collectList().block().size(); sum += queryResultCount; } assertThat(sum).isEqualTo(createdDocuments.size()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void compositeContinuationTokenRoundTrip() throws Exception { { CompositeContinuationToken compositeContinuationToken = new CompositeContinuationToken("asdf", new Range<String>("A", "D", false, true)); String serialized = compositeContinuationToken.toString(); ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse(serialized, outCompositeContinuationToken); assertThat(succeeed).isTrue(); CompositeContinuationToken deserialized = outCompositeContinuationToken.v; String token = deserialized.getToken(); Range<String> range = deserialized.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"property\" : \"not a valid composite continuation token\"}", outCompositeContinuationToken); assertThat(succeeed).isFalse(); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"token\":\"-RID:tZFQAImzNLQLAAAAAAAAAA== assertThat(succeeed).isFalse(); } } @Test(groups = { "non-emulator" }, timeOut = TIMEOUT * 10) public void queryDocumentsWithCompositeContinuationTokens() throws Exception { String query = "SELECT * FROM c"; List<CosmosItemProperties> expectedDocs = new ArrayList<>(createdDocuments); assertThat(expectedDocs).isNotEmpty(); this.queryWithContinuationTokensAndPageSizes(query, new int[] {1, 10, 100}, expectedDocs); } @Test(groups = { "simple" }) public void queryDocumentsStringValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<String> expectedValues = createdDocuments.stream().map(d -> d.getId()).collect(Collectors.toList()); String query = "Select value c.id from c"; Flux<FeedResponse<String>> queryObservable = createdCollection.queryItems(query, options, String.class); List<String> fetchedResults = new ArrayList<>(); queryObservable.map(stringFeedResponse -> fetchedResults.addAll(stringFeedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) @Test(groups = { "simple" }) public void queryDocumentsIntegerValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<Integer> expectedValues = createdDocuments.stream().map(d -> d.getInt("prop")).collect(Collectors.toList()); String query = "Select value c.prop from c"; Flux<FeedResponse<Integer>> queryObservable = createdCollection.queryItems(query, options, Integer.class); List<Integer> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) public void queryDocumentsPojo(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); String query = "Select * from c"; Flux<FeedResponse<TestObject>> queryObservable = createdCollection.queryItems(query, options, TestObject.class); List<TestObject> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); List<Tuple> assertTuples = createdDocuments.stream() .map(cosmosItemProperties -> tuple(cosmosItemProperties.getId(), cosmosItemProperties.get("mypk"), cosmosItemProperties.get("prop"), cosmosItemProperties.get("boolProp"))) .collect(Collectors.toList()); assertThat(fetchedResults).extracting(TestObject::getId, TestObject::getMypk, TestObject::getProp, TestObject::getBoolProp) .containsAll(assertTuples); } @BeforeClass(groups = { "simple", "non-emulator" }, timeOut = 4 * SETUP_TIMEOUT) public void before_ParallelDocumentQueryTest() { client = clientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); try { truncateCollection(createdCollection); } catch (Throwable firstChanceException) { try { truncateCollection(createdCollection); } catch (Throwable lastChanceException) { String message = Strings.lenientFormat("container %s truncation failed due to first chance %s followed by last chance %s", createdCollection, firstChanceException, lastChanceException); logger.error(message); fail(message, lastChanceException); } } List<CosmosItemProperties> docDefList = new ArrayList<>(); for (int i = 0; i < 13; i++) { docDefList.add(getDocumentDefinition(i)); } for (int i = 0; i < 21; i++) { docDefList.add(getDocumentDefinition(99)); } createdDocuments = bulkInsertBlocking(createdCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } @AfterClass(groups = { "simple", "non-emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private static CosmosItemProperties getDocumentDefinition(int cnt) { String uuid = UUID.randomUUID().toString(); boolean boolVal = cnt % 2 == 0; CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"prop\" : %d, " + "\"boolProp\" : %b, " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, cnt, boolVal, uuid)); return doc; } static class TestObject{ String id; int prop; Boolean boolProp; String mypk; List<List<Integer>> sgmts; public String getId() { return id; } public void setId(String id) { this.id = id; } public Integer getProp() { return prop; } public void setProp(Integer prop) { this.prop = prop; } public Boolean getBoolProp() { return boolProp; } public void setBoolProp(Boolean boolProp) { this.boolProp = boolProp; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public List<List<Integer>> getSgmts() { return sgmts; } public void setSgmts(List<List<Integer>> sgmts) { this.sgmts = sgmts; } } @Test(groups = { "simple" }, timeOut = TIMEOUT, enabled = false) public void invalidQuerySytax() throws Exception { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder().instanceOf(CosmosClientException.class) .statusCode(400).notNullActivityId().build(); validateQueryFailure(queryObservable, validator); } public CosmosItemProperties createDocument(CosmosAsyncContainer cosmosContainer, int cnt) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(cnt); return cosmosContainer.createItem(docDefinition).block().getProperties(); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<CosmosItemProperties> expectedDocs) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.getResourceId()); } List<String> expectedIds = new ArrayList<String>(); for (CosmosItemProperties document : expectedDocs) { expectedIds.add(document.getResourceId()); } assertThat(actualIds).containsOnlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } }
I don't see any assertion in the test. Are we asserting elsewhere?
public void queryDocumentsArrayValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<List<Integer>> expectedValues = new ArrayList<>(); ArrayList<Integer> a1 = new ArrayList<Integer>(); ArrayList<Integer> a2 = new ArrayList<Integer>(); a1.add(6519456); a1.add(1471916863); a2.add(2498434); a2.add(1455671440); expectedValues.add(a1); expectedValues.add(a2); String query = "Select top 2 value c.sgmts from c"; Flux<FeedResponse<List>> queryObservable = createdCollection.queryItems(query, options, List.class); List<List> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); }
queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast();
public void queryDocumentsArrayValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); Collection<List<List<Integer>>> expectedValues = new ArrayList<>(); List<List<Integer>> lists = new ArrayList<>(); List<Integer> a1 = new ArrayList<>(); ArrayList<Integer> a2 = new ArrayList<>(); a1.add(6519456); a1.add(1471916863); a2.add(2498434); a2.add(1455671440); lists.add(a1); lists.add(a2); expectedValues.add(lists); expectedValues.add(lists); expectedValues.add(lists); expectedValues.add(lists); String query = "Select top 2 value c.sgmts from c"; Flux<FeedResponse<List>> queryObservable = createdCollection.queryItems(query, options, List.class); List<List> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); }
class ParallelDocumentQueryTest extends TestSuiteBase { private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdCollection; private List<CosmosItemProperties> createdDocuments; private CosmosAsyncClient client; public String getCollectionLink() { return TestUtils.getCollectionNameLink(createdDatabase.getId(), createdCollection.getId()); } @Factory(dataProvider = "clientBuildersWithDirect") public ParallelDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @DataProvider(name = "queryMetricsArgProvider") public Object[][] queryMetricsArgProvider() { return new Object[][]{ {true}, {false}, }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocuments(boolean qmEnabled) { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); options.setMaxDegreeOfParallelism(2); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments.stream().filter(d -> 99 == d.getInt("prop") ).collect(Collectors.toList()); assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .totalSize(expectedDocs.size()) .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.getResourceId()).collect(Collectors.toList())) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator, TIMEOUT); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryMetricEquality() throws Exception { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(true); options.setMaxDegreeOfParallelism(0); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList1 = queryObservable.collectList().block(); options.setMaxDegreeOfParallelism(4); Flux<FeedResponse<CosmosItemProperties>> threadedQueryObs = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList2 = threadedQueryObs.collectList().block(); assertThat(resultList1.size()).isEqualTo(resultList2.size()); for(int i = 0; i < resultList1.size(); i++){ compareQueryMetrics(BridgeInternal.queryMetricsFromFeedResponse(resultList1.get(i)), BridgeInternal.queryMetricsFromFeedResponse(resultList2.get(i))); } } private void compareQueryMetrics(Map<String, QueryMetrics> qm1, Map<String, QueryMetrics> qm2) { assertThat(qm1.keySet().size()).isEqualTo(qm2.keySet().size()); QueryMetrics queryMetrics1 = BridgeInternal.createQueryMetricsFromCollection(qm1.values()); QueryMetrics queryMetrics2 = BridgeInternal.createQueryMetricsFromCollection(qm2.values()); assertThat(queryMetrics1.getRetrievedDocumentSize()).isEqualTo(queryMetrics2.getRetrievedDocumentSize()); assertThat(queryMetrics1.getRetrievedDocumentCount()).isEqualTo(queryMetrics2.getRetrievedDocumentCount()); assertThat(queryMetrics1.getIndexHitDocumentCount()).isEqualTo(queryMetrics2.getIndexHitDocumentCount()); assertThat(queryMetrics1.getOutputDocumentCount()).isEqualTo(queryMetrics2.getOutputDocumentCount()); assertThat(queryMetrics1.getOutputDocumentSize()).isEqualTo(queryMetrics2.getOutputDocumentSize()); assertThat(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()) .isEqualTo(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() { String query = "SELECT * from root r where r.id = '2'"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPagesIsGreaterThanOrEqualTo(1) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .pageSizeIsLessThanOrEqualTo(0) .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void queryDocumentsWithPageSize() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); int pageSize = 3; options.maxItemCount(pageSize); options.setMaxDegreeOfParallelism(-1); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments; assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator .Builder<CosmosItemProperties>() .exactlyContainsInAnyOrder(expectedDocs .stream() .map(d -> d.getResourceId()) .collect(Collectors.toList())) .numberOfPagesIsGreaterThanOrEqualTo((expectedDocs.size() + 1) / 3) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0) .pageSizeIsLessThanOrEqualTo(pageSize) .build()) .build(); validateQuerySuccess(queryObservable, validator, 2 * subscriberValidationTimeout); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void invalidQuerySyntax() { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .notNullActivityId() .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void crossPartitionQueryNotEnabled() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void partitionKeyRangeId() { int sum = 0; for (String partitionKeyRangeId : CosmosBridgeInternal.getAsyncDocumentClient(client).readPartitionKeyRanges(getCollectionLink(), null) .flatMap(p -> Flux.fromIterable(p.getResults())) .map(Resource::getId).collectList().single().block()) { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); partitionKeyRangeIdInternal(options, partitionKeyRangeId); int queryResultCount = createdCollection.queryItems(query, options, CosmosItemProperties.class) .flatMap(p -> Flux.fromIterable(p.getResults())) .collectList().block().size(); sum += queryResultCount; } assertThat(sum).isEqualTo(createdDocuments.size()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void compositeContinuationTokenRoundTrip() throws Exception { { CompositeContinuationToken compositeContinuationToken = new CompositeContinuationToken("asdf", new Range<String>("A", "D", false, true)); String serialized = compositeContinuationToken.toString(); ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse(serialized, outCompositeContinuationToken); assertThat(succeeed).isTrue(); CompositeContinuationToken deserialized = outCompositeContinuationToken.v; String token = deserialized.getToken(); Range<String> range = deserialized.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"property\" : \"not a valid composite continuation token\"}", outCompositeContinuationToken); assertThat(succeeed).isFalse(); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"token\":\"-RID:tZFQAImzNLQLAAAAAAAAAA== assertThat(succeeed).isFalse(); } } @Test(groups = { "non-emulator" }, timeOut = TIMEOUT * 10) public void queryDocumentsWithCompositeContinuationTokens() throws Exception { String query = "SELECT * FROM c"; List<CosmosItemProperties> expectedDocs = new ArrayList<>(createdDocuments); assertThat(expectedDocs).isNotEmpty(); this.queryWithContinuationTokensAndPageSizes(query, new int[] {1, 10, 100}, expectedDocs); } @Test(groups = { "simple" }) public void queryDocumentsStringValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<String> expectedValues = createdDocuments.stream().map(d -> d.getId()).collect(Collectors.toList()); String query = "Select value c.id from c"; Flux<FeedResponse<String>> queryObservable = createdCollection.queryItems(query, options, String.class); List<String> fetchedResults = new ArrayList<>(); queryObservable.map(stringFeedResponse -> fetchedResults.addAll(stringFeedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) @Test(groups = { "simple" }) public void queryDocumentsIntegerValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<Integer> expectedValues = createdDocuments.stream().map(d -> d.getInt("prop")).collect(Collectors.toList()); String query = "Select value c.prop from c"; Flux<FeedResponse<Integer>> queryObservable = createdCollection.queryItems(query, options, Integer.class); List<Integer> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) public void queryDocumentsPojo(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); String query = "Select * from c"; Flux<FeedResponse<TestObject>> queryObservable = createdCollection.queryItems(query, options, TestObject.class); List<TestObject> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); System.out.println("fetchedResults = " + fetchedResults); } @BeforeClass(groups = { "simple", "non-emulator" }, timeOut = 4 * SETUP_TIMEOUT) public void before_ParallelDocumentQueryTest() { client = clientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); try { truncateCollection(createdCollection); } catch (Throwable firstChanceException) { try { truncateCollection(createdCollection); } catch (Throwable lastChanceException) { String message = Strings.lenientFormat("container %s truncation failed due to first chance %s followed by last chance %s", createdCollection, firstChanceException, lastChanceException); logger.error(message); fail(message, lastChanceException); } } List<CosmosItemProperties> docDefList = new ArrayList<>(); for (int i = 0; i < 13; i++) { docDefList.add(getDocumentDefinition(i)); } for (int i = 0; i < 21; i++) { docDefList.add(getDocumentDefinition(99)); } createdDocuments = bulkInsertBlocking(createdCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } @AfterClass(groups = { "simple", "non-emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private static CosmosItemProperties getDocumentDefinition(int cnt) { String uuid = UUID.randomUUID().toString(); boolean boolVal = cnt % 2 == 0; CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"prop\" : %d, " + "\"boolProp\" : %b, " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, cnt, boolVal, uuid)); return doc; } static class TestObject{ String id; int prop; Boolean boolProp; String mypk; List<List<Integer>> sgmts; public String getId() { return id; } public void setId(String id) { this.id = id; } public Integer getProp() { return prop; } public void setProp(Integer prop) { this.prop = prop; } public Boolean getBoolProp() { return boolProp; } public void setBoolProp(Boolean boolProp) { this.boolProp = boolProp; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public List<List<Integer>> getSgmts() { return sgmts; } public void setSgmts(List<List<Integer>> sgmts) { this.sgmts = sgmts; } } @Test(groups = { "simple" }, timeOut = TIMEOUT, enabled = false) public void invalidQuerySytax() throws Exception { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder().instanceOf(CosmosClientException.class) .statusCode(400).notNullActivityId().build(); validateQueryFailure(queryObservable, validator); } public CosmosItemProperties createDocument(CosmosAsyncContainer cosmosContainer, int cnt) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(cnt); return cosmosContainer.createItem(docDefinition).block().getProperties(); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<CosmosItemProperties> expectedDocs) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.getResourceId()); } List<String> expectedIds = new ArrayList<String>(); for (CosmosItemProperties document : expectedDocs) { expectedIds.add(document.getResourceId()); } assertThat(actualIds).containsOnlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } }
class ParallelDocumentQueryTest extends TestSuiteBase { private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdCollection; private List<CosmosItemProperties> createdDocuments; private CosmosAsyncClient client; public String getCollectionLink() { return TestUtils.getCollectionNameLink(createdDatabase.getId(), createdCollection.getId()); } @Factory(dataProvider = "clientBuildersWithDirect") public ParallelDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @DataProvider(name = "queryMetricsArgProvider") public Object[][] queryMetricsArgProvider() { return new Object[][]{ {true}, {false}, }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocuments(boolean qmEnabled) { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); options.setMaxDegreeOfParallelism(2); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments.stream().filter(d -> 99 == d.getInt("prop") ).collect(Collectors.toList()); assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .totalSize(expectedDocs.size()) .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.getResourceId()).collect(Collectors.toList())) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator, TIMEOUT); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryMetricEquality() throws Exception { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(true); options.setMaxDegreeOfParallelism(0); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList1 = queryObservable.collectList().block(); options.setMaxDegreeOfParallelism(4); Flux<FeedResponse<CosmosItemProperties>> threadedQueryObs = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList2 = threadedQueryObs.collectList().block(); assertThat(resultList1.size()).isEqualTo(resultList2.size()); for(int i = 0; i < resultList1.size(); i++){ compareQueryMetrics(BridgeInternal.queryMetricsFromFeedResponse(resultList1.get(i)), BridgeInternal.queryMetricsFromFeedResponse(resultList2.get(i))); } } private void compareQueryMetrics(Map<String, QueryMetrics> qm1, Map<String, QueryMetrics> qm2) { assertThat(qm1.keySet().size()).isEqualTo(qm2.keySet().size()); QueryMetrics queryMetrics1 = BridgeInternal.createQueryMetricsFromCollection(qm1.values()); QueryMetrics queryMetrics2 = BridgeInternal.createQueryMetricsFromCollection(qm2.values()); assertThat(queryMetrics1.getRetrievedDocumentSize()).isEqualTo(queryMetrics2.getRetrievedDocumentSize()); assertThat(queryMetrics1.getRetrievedDocumentCount()).isEqualTo(queryMetrics2.getRetrievedDocumentCount()); assertThat(queryMetrics1.getIndexHitDocumentCount()).isEqualTo(queryMetrics2.getIndexHitDocumentCount()); assertThat(queryMetrics1.getOutputDocumentCount()).isEqualTo(queryMetrics2.getOutputDocumentCount()); assertThat(queryMetrics1.getOutputDocumentSize()).isEqualTo(queryMetrics2.getOutputDocumentSize()); assertThat(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()) .isEqualTo(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() { String query = "SELECT * from root r where r.id = '2'"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPagesIsGreaterThanOrEqualTo(1) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .pageSizeIsLessThanOrEqualTo(0) .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void queryDocumentsWithPageSize() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); int pageSize = 3; options.maxItemCount(pageSize); options.setMaxDegreeOfParallelism(-1); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments; assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator .Builder<CosmosItemProperties>() .exactlyContainsInAnyOrder(expectedDocs .stream() .map(d -> d.getResourceId()) .collect(Collectors.toList())) .numberOfPagesIsGreaterThanOrEqualTo((expectedDocs.size() + 1) / 3) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0) .pageSizeIsLessThanOrEqualTo(pageSize) .build()) .build(); validateQuerySuccess(queryObservable, validator, 2 * subscriberValidationTimeout); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void invalidQuerySyntax() { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .notNullActivityId() .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void crossPartitionQueryNotEnabled() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void partitionKeyRangeId() { int sum = 0; for (String partitionKeyRangeId : CosmosBridgeInternal.getAsyncDocumentClient(client).readPartitionKeyRanges(getCollectionLink(), null) .flatMap(p -> Flux.fromIterable(p.getResults())) .map(Resource::getId).collectList().single().block()) { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); partitionKeyRangeIdInternal(options, partitionKeyRangeId); int queryResultCount = createdCollection.queryItems(query, options, CosmosItemProperties.class) .flatMap(p -> Flux.fromIterable(p.getResults())) .collectList().block().size(); sum += queryResultCount; } assertThat(sum).isEqualTo(createdDocuments.size()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void compositeContinuationTokenRoundTrip() throws Exception { { CompositeContinuationToken compositeContinuationToken = new CompositeContinuationToken("asdf", new Range<String>("A", "D", false, true)); String serialized = compositeContinuationToken.toString(); ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse(serialized, outCompositeContinuationToken); assertThat(succeeed).isTrue(); CompositeContinuationToken deserialized = outCompositeContinuationToken.v; String token = deserialized.getToken(); Range<String> range = deserialized.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"property\" : \"not a valid composite continuation token\"}", outCompositeContinuationToken); assertThat(succeeed).isFalse(); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"token\":\"-RID:tZFQAImzNLQLAAAAAAAAAA== assertThat(succeeed).isFalse(); } } @Test(groups = { "non-emulator" }, timeOut = TIMEOUT * 10) public void queryDocumentsWithCompositeContinuationTokens() throws Exception { String query = "SELECT * FROM c"; List<CosmosItemProperties> expectedDocs = new ArrayList<>(createdDocuments); assertThat(expectedDocs).isNotEmpty(); this.queryWithContinuationTokensAndPageSizes(query, new int[] {1, 10, 100}, expectedDocs); } @Test(groups = { "simple" }) public void queryDocumentsStringValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<String> expectedValues = createdDocuments.stream().map(d -> d.getId()).collect(Collectors.toList()); String query = "Select value c.id from c"; Flux<FeedResponse<String>> queryObservable = createdCollection.queryItems(query, options, String.class); List<String> fetchedResults = new ArrayList<>(); queryObservable.map(stringFeedResponse -> fetchedResults.addAll(stringFeedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) @Test(groups = { "simple" }) public void queryDocumentsIntegerValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<Integer> expectedValues = createdDocuments.stream().map(d -> d.getInt("prop")).collect(Collectors.toList()); String query = "Select value c.prop from c"; Flux<FeedResponse<Integer>> queryObservable = createdCollection.queryItems(query, options, Integer.class); List<Integer> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) public void queryDocumentsPojo(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); String query = "Select * from c"; Flux<FeedResponse<TestObject>> queryObservable = createdCollection.queryItems(query, options, TestObject.class); List<TestObject> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); List<Tuple> assertTuples = createdDocuments.stream() .map(cosmosItemProperties -> tuple(cosmosItemProperties.getId(), cosmosItemProperties.get("mypk"), cosmosItemProperties.get("prop"), cosmosItemProperties.get("boolProp"))) .collect(Collectors.toList()); assertThat(fetchedResults).extracting(TestObject::getId, TestObject::getMypk, TestObject::getProp, TestObject::getBoolProp) .containsAll(assertTuples); } @BeforeClass(groups = { "simple", "non-emulator" }, timeOut = 4 * SETUP_TIMEOUT) public void before_ParallelDocumentQueryTest() { client = clientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); try { truncateCollection(createdCollection); } catch (Throwable firstChanceException) { try { truncateCollection(createdCollection); } catch (Throwable lastChanceException) { String message = Strings.lenientFormat("container %s truncation failed due to first chance %s followed by last chance %s", createdCollection, firstChanceException, lastChanceException); logger.error(message); fail(message, lastChanceException); } } List<CosmosItemProperties> docDefList = new ArrayList<>(); for (int i = 0; i < 13; i++) { docDefList.add(getDocumentDefinition(i)); } for (int i = 0; i < 21; i++) { docDefList.add(getDocumentDefinition(99)); } createdDocuments = bulkInsertBlocking(createdCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } @AfterClass(groups = { "simple", "non-emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private static CosmosItemProperties getDocumentDefinition(int cnt) { String uuid = UUID.randomUUID().toString(); boolean boolVal = cnt % 2 == 0; CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"prop\" : %d, " + "\"boolProp\" : %b, " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, cnt, boolVal, uuid)); return doc; } static class TestObject{ String id; int prop; Boolean boolProp; String mypk; List<List<Integer>> sgmts; public String getId() { return id; } public void setId(String id) { this.id = id; } public Integer getProp() { return prop; } public void setProp(Integer prop) { this.prop = prop; } public Boolean getBoolProp() { return boolProp; } public void setBoolProp(Boolean boolProp) { this.boolProp = boolProp; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public List<List<Integer>> getSgmts() { return sgmts; } public void setSgmts(List<List<Integer>> sgmts) { this.sgmts = sgmts; } } @Test(groups = { "simple" }, timeOut = TIMEOUT, enabled = false) public void invalidQuerySytax() throws Exception { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder().instanceOf(CosmosClientException.class) .statusCode(400).notNullActivityId().build(); validateQueryFailure(queryObservable, validator); } public CosmosItemProperties createDocument(CosmosAsyncContainer cosmosContainer, int cnt) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(cnt); return cosmosContainer.createItem(docDefinition).block().getProperties(); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<CosmosItemProperties> expectedDocs) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.getResourceId()); } List<String> expectedIds = new ArrayList<String>(); for (CosmosItemProperties document : expectedDocs) { expectedIds.add(document.getResourceId()); } assertThat(actualIds).containsOnlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } }
I am not seeing any assertion in the test. Does test verify that the result is correct?
public void queryDocumentsPojo(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); String query = "Select * from c"; Flux<FeedResponse<TestObject>> queryObservable = createdCollection.queryItems(query, options, TestObject.class); List<TestObject> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); System.out.println("fetchedResults = " + fetchedResults); }
System.out.println("fetchedResults = " + fetchedResults);
public void queryDocumentsPojo(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); String query = "Select * from c"; Flux<FeedResponse<TestObject>> queryObservable = createdCollection.queryItems(query, options, TestObject.class); List<TestObject> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); List<Tuple> assertTuples = createdDocuments.stream() .map(cosmosItemProperties -> tuple(cosmosItemProperties.getId(), cosmosItemProperties.get("mypk"), cosmosItemProperties.get("prop"), cosmosItemProperties.get("boolProp"))) .collect(Collectors.toList()); assertThat(fetchedResults).extracting(TestObject::getId, TestObject::getMypk, TestObject::getProp, TestObject::getBoolProp) .containsAll(assertTuples); }
class ParallelDocumentQueryTest extends TestSuiteBase { private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdCollection; private List<CosmosItemProperties> createdDocuments; private CosmosAsyncClient client; public String getCollectionLink() { return TestUtils.getCollectionNameLink(createdDatabase.getId(), createdCollection.getId()); } @Factory(dataProvider = "clientBuildersWithDirect") public ParallelDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @DataProvider(name = "queryMetricsArgProvider") public Object[][] queryMetricsArgProvider() { return new Object[][]{ {true}, {false}, }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocuments(boolean qmEnabled) { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); options.setMaxDegreeOfParallelism(2); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments.stream().filter(d -> 99 == d.getInt("prop") ).collect(Collectors.toList()); assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .totalSize(expectedDocs.size()) .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.getResourceId()).collect(Collectors.toList())) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator, TIMEOUT); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryMetricEquality() throws Exception { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(true); options.setMaxDegreeOfParallelism(0); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList1 = queryObservable.collectList().block(); options.setMaxDegreeOfParallelism(4); Flux<FeedResponse<CosmosItemProperties>> threadedQueryObs = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList2 = threadedQueryObs.collectList().block(); assertThat(resultList1.size()).isEqualTo(resultList2.size()); for(int i = 0; i < resultList1.size(); i++){ compareQueryMetrics(BridgeInternal.queryMetricsFromFeedResponse(resultList1.get(i)), BridgeInternal.queryMetricsFromFeedResponse(resultList2.get(i))); } } private void compareQueryMetrics(Map<String, QueryMetrics> qm1, Map<String, QueryMetrics> qm2) { assertThat(qm1.keySet().size()).isEqualTo(qm2.keySet().size()); QueryMetrics queryMetrics1 = BridgeInternal.createQueryMetricsFromCollection(qm1.values()); QueryMetrics queryMetrics2 = BridgeInternal.createQueryMetricsFromCollection(qm2.values()); assertThat(queryMetrics1.getRetrievedDocumentSize()).isEqualTo(queryMetrics2.getRetrievedDocumentSize()); assertThat(queryMetrics1.getRetrievedDocumentCount()).isEqualTo(queryMetrics2.getRetrievedDocumentCount()); assertThat(queryMetrics1.getIndexHitDocumentCount()).isEqualTo(queryMetrics2.getIndexHitDocumentCount()); assertThat(queryMetrics1.getOutputDocumentCount()).isEqualTo(queryMetrics2.getOutputDocumentCount()); assertThat(queryMetrics1.getOutputDocumentSize()).isEqualTo(queryMetrics2.getOutputDocumentSize()); assertThat(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()) .isEqualTo(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() { String query = "SELECT * from root r where r.id = '2'"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPagesIsGreaterThanOrEqualTo(1) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .pageSizeIsLessThanOrEqualTo(0) .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void queryDocumentsWithPageSize() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); int pageSize = 3; options.maxItemCount(pageSize); options.setMaxDegreeOfParallelism(-1); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments; assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator .Builder<CosmosItemProperties>() .exactlyContainsInAnyOrder(expectedDocs .stream() .map(d -> d.getResourceId()) .collect(Collectors.toList())) .numberOfPagesIsGreaterThanOrEqualTo((expectedDocs.size() + 1) / 3) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0) .pageSizeIsLessThanOrEqualTo(pageSize) .build()) .build(); validateQuerySuccess(queryObservable, validator, 2 * subscriberValidationTimeout); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void invalidQuerySyntax() { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .notNullActivityId() .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void crossPartitionQueryNotEnabled() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void partitionKeyRangeId() { int sum = 0; for (String partitionKeyRangeId : CosmosBridgeInternal.getAsyncDocumentClient(client).readPartitionKeyRanges(getCollectionLink(), null) .flatMap(p -> Flux.fromIterable(p.getResults())) .map(Resource::getId).collectList().single().block()) { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); partitionKeyRangeIdInternal(options, partitionKeyRangeId); int queryResultCount = createdCollection.queryItems(query, options, CosmosItemProperties.class) .flatMap(p -> Flux.fromIterable(p.getResults())) .collectList().block().size(); sum += queryResultCount; } assertThat(sum).isEqualTo(createdDocuments.size()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void compositeContinuationTokenRoundTrip() throws Exception { { CompositeContinuationToken compositeContinuationToken = new CompositeContinuationToken("asdf", new Range<String>("A", "D", false, true)); String serialized = compositeContinuationToken.toString(); ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse(serialized, outCompositeContinuationToken); assertThat(succeeed).isTrue(); CompositeContinuationToken deserialized = outCompositeContinuationToken.v; String token = deserialized.getToken(); Range<String> range = deserialized.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"property\" : \"not a valid composite continuation token\"}", outCompositeContinuationToken); assertThat(succeeed).isFalse(); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"token\":\"-RID:tZFQAImzNLQLAAAAAAAAAA== assertThat(succeeed).isFalse(); } } @Test(groups = { "non-emulator" }, timeOut = TIMEOUT * 10) public void queryDocumentsWithCompositeContinuationTokens() throws Exception { String query = "SELECT * FROM c"; List<CosmosItemProperties> expectedDocs = new ArrayList<>(createdDocuments); assertThat(expectedDocs).isNotEmpty(); this.queryWithContinuationTokensAndPageSizes(query, new int[] {1, 10, 100}, expectedDocs); } @Test(groups = { "simple" }) public void queryDocumentsStringValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<String> expectedValues = createdDocuments.stream().map(d -> d.getId()).collect(Collectors.toList()); String query = "Select value c.id from c"; Flux<FeedResponse<String>> queryObservable = createdCollection.queryItems(query, options, String.class); List<String> fetchedResults = new ArrayList<>(); queryObservable.map(stringFeedResponse -> fetchedResults.addAll(stringFeedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) public void queryDocumentsArrayValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<List<Integer>> expectedValues = new ArrayList<>(); ArrayList<Integer> a1 = new ArrayList<Integer>(); ArrayList<Integer> a2 = new ArrayList<Integer>(); a1.add(6519456); a1.add(1471916863); a2.add(2498434); a2.add(1455671440); expectedValues.add(a1); expectedValues.add(a2); String query = "Select top 2 value c.sgmts from c"; Flux<FeedResponse<List>> queryObservable = createdCollection.queryItems(query, options, List.class); List<List> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); } @Test(groups = { "simple" }) public void queryDocumentsIntegerValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<Integer> expectedValues = createdDocuments.stream().map(d -> d.getInt("prop")).collect(Collectors.toList()); String query = "Select value c.prop from c"; Flux<FeedResponse<Integer>> queryObservable = createdCollection.queryItems(query, options, Integer.class); List<Integer> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) @BeforeClass(groups = { "simple", "non-emulator" }, timeOut = 4 * SETUP_TIMEOUT) public void before_ParallelDocumentQueryTest() { client = clientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); try { truncateCollection(createdCollection); } catch (Throwable firstChanceException) { try { truncateCollection(createdCollection); } catch (Throwable lastChanceException) { String message = Strings.lenientFormat("container %s truncation failed due to first chance %s followed by last chance %s", createdCollection, firstChanceException, lastChanceException); logger.error(message); fail(message, lastChanceException); } } List<CosmosItemProperties> docDefList = new ArrayList<>(); for (int i = 0; i < 13; i++) { docDefList.add(getDocumentDefinition(i)); } for (int i = 0; i < 21; i++) { docDefList.add(getDocumentDefinition(99)); } createdDocuments = bulkInsertBlocking(createdCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } @AfterClass(groups = { "simple", "non-emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private static CosmosItemProperties getDocumentDefinition(int cnt) { String uuid = UUID.randomUUID().toString(); boolean boolVal = cnt % 2 == 0; CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"prop\" : %d, " + "\"boolProp\" : %b, " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, cnt, boolVal, uuid)); return doc; } static class TestObject{ String id; int prop; Boolean boolProp; String mypk; List<List<Integer>> sgmts; public String getId() { return id; } public void setId(String id) { this.id = id; } public Integer getProp() { return prop; } public void setProp(Integer prop) { this.prop = prop; } public Boolean getBoolProp() { return boolProp; } public void setBoolProp(Boolean boolProp) { this.boolProp = boolProp; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public List<List<Integer>> getSgmts() { return sgmts; } public void setSgmts(List<List<Integer>> sgmts) { this.sgmts = sgmts; } } @Test(groups = { "simple" }, timeOut = TIMEOUT, enabled = false) public void invalidQuerySytax() throws Exception { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder().instanceOf(CosmosClientException.class) .statusCode(400).notNullActivityId().build(); validateQueryFailure(queryObservable, validator); } public CosmosItemProperties createDocument(CosmosAsyncContainer cosmosContainer, int cnt) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(cnt); return cosmosContainer.createItem(docDefinition).block().getProperties(); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<CosmosItemProperties> expectedDocs) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.getResourceId()); } List<String> expectedIds = new ArrayList<String>(); for (CosmosItemProperties document : expectedDocs) { expectedIds.add(document.getResourceId()); } assertThat(actualIds).containsOnlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } }
class ParallelDocumentQueryTest extends TestSuiteBase { private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdCollection; private List<CosmosItemProperties> createdDocuments; private CosmosAsyncClient client; public String getCollectionLink() { return TestUtils.getCollectionNameLink(createdDatabase.getId(), createdCollection.getId()); } @Factory(dataProvider = "clientBuildersWithDirect") public ParallelDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @DataProvider(name = "queryMetricsArgProvider") public Object[][] queryMetricsArgProvider() { return new Object[][]{ {true}, {false}, }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocuments(boolean qmEnabled) { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); options.setMaxDegreeOfParallelism(2); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments.stream().filter(d -> 99 == d.getInt("prop") ).collect(Collectors.toList()); assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .totalSize(expectedDocs.size()) .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.getResourceId()).collect(Collectors.toList())) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator, TIMEOUT); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryMetricEquality() throws Exception { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(true); options.setMaxDegreeOfParallelism(0); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList1 = queryObservable.collectList().block(); options.setMaxDegreeOfParallelism(4); Flux<FeedResponse<CosmosItemProperties>> threadedQueryObs = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList2 = threadedQueryObs.collectList().block(); assertThat(resultList1.size()).isEqualTo(resultList2.size()); for(int i = 0; i < resultList1.size(); i++){ compareQueryMetrics(BridgeInternal.queryMetricsFromFeedResponse(resultList1.get(i)), BridgeInternal.queryMetricsFromFeedResponse(resultList2.get(i))); } } private void compareQueryMetrics(Map<String, QueryMetrics> qm1, Map<String, QueryMetrics> qm2) { assertThat(qm1.keySet().size()).isEqualTo(qm2.keySet().size()); QueryMetrics queryMetrics1 = BridgeInternal.createQueryMetricsFromCollection(qm1.values()); QueryMetrics queryMetrics2 = BridgeInternal.createQueryMetricsFromCollection(qm2.values()); assertThat(queryMetrics1.getRetrievedDocumentSize()).isEqualTo(queryMetrics2.getRetrievedDocumentSize()); assertThat(queryMetrics1.getRetrievedDocumentCount()).isEqualTo(queryMetrics2.getRetrievedDocumentCount()); assertThat(queryMetrics1.getIndexHitDocumentCount()).isEqualTo(queryMetrics2.getIndexHitDocumentCount()); assertThat(queryMetrics1.getOutputDocumentCount()).isEqualTo(queryMetrics2.getOutputDocumentCount()); assertThat(queryMetrics1.getOutputDocumentSize()).isEqualTo(queryMetrics2.getOutputDocumentSize()); assertThat(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()) .isEqualTo(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() { String query = "SELECT * from root r where r.id = '2'"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPagesIsGreaterThanOrEqualTo(1) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .pageSizeIsLessThanOrEqualTo(0) .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void queryDocumentsWithPageSize() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); int pageSize = 3; options.maxItemCount(pageSize); options.setMaxDegreeOfParallelism(-1); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments; assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator .Builder<CosmosItemProperties>() .exactlyContainsInAnyOrder(expectedDocs .stream() .map(d -> d.getResourceId()) .collect(Collectors.toList())) .numberOfPagesIsGreaterThanOrEqualTo((expectedDocs.size() + 1) / 3) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0) .pageSizeIsLessThanOrEqualTo(pageSize) .build()) .build(); validateQuerySuccess(queryObservable, validator, 2 * subscriberValidationTimeout); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void invalidQuerySyntax() { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .notNullActivityId() .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void crossPartitionQueryNotEnabled() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void partitionKeyRangeId() { int sum = 0; for (String partitionKeyRangeId : CosmosBridgeInternal.getAsyncDocumentClient(client).readPartitionKeyRanges(getCollectionLink(), null) .flatMap(p -> Flux.fromIterable(p.getResults())) .map(Resource::getId).collectList().single().block()) { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); partitionKeyRangeIdInternal(options, partitionKeyRangeId); int queryResultCount = createdCollection.queryItems(query, options, CosmosItemProperties.class) .flatMap(p -> Flux.fromIterable(p.getResults())) .collectList().block().size(); sum += queryResultCount; } assertThat(sum).isEqualTo(createdDocuments.size()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void compositeContinuationTokenRoundTrip() throws Exception { { CompositeContinuationToken compositeContinuationToken = new CompositeContinuationToken("asdf", new Range<String>("A", "D", false, true)); String serialized = compositeContinuationToken.toString(); ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse(serialized, outCompositeContinuationToken); assertThat(succeeed).isTrue(); CompositeContinuationToken deserialized = outCompositeContinuationToken.v; String token = deserialized.getToken(); Range<String> range = deserialized.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"property\" : \"not a valid composite continuation token\"}", outCompositeContinuationToken); assertThat(succeeed).isFalse(); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"token\":\"-RID:tZFQAImzNLQLAAAAAAAAAA== assertThat(succeeed).isFalse(); } } @Test(groups = { "non-emulator" }, timeOut = TIMEOUT * 10) public void queryDocumentsWithCompositeContinuationTokens() throws Exception { String query = "SELECT * FROM c"; List<CosmosItemProperties> expectedDocs = new ArrayList<>(createdDocuments); assertThat(expectedDocs).isNotEmpty(); this.queryWithContinuationTokensAndPageSizes(query, new int[] {1, 10, 100}, expectedDocs); } @Test(groups = { "simple" }) public void queryDocumentsStringValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<String> expectedValues = createdDocuments.stream().map(d -> d.getId()).collect(Collectors.toList()); String query = "Select value c.id from c"; Flux<FeedResponse<String>> queryObservable = createdCollection.queryItems(query, options, String.class); List<String> fetchedResults = new ArrayList<>(); queryObservable.map(stringFeedResponse -> fetchedResults.addAll(stringFeedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) public void queryDocumentsArrayValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); Collection<List<List<Integer>>> expectedValues = new ArrayList<>(); List<List<Integer>> lists = new ArrayList<>(); List<Integer> a1 = new ArrayList<>(); ArrayList<Integer> a2 = new ArrayList<>(); a1.add(6519456); a1.add(1471916863); a2.add(2498434); a2.add(1455671440); lists.add(a1); lists.add(a2); expectedValues.add(lists); expectedValues.add(lists); expectedValues.add(lists); expectedValues.add(lists); String query = "Select top 2 value c.sgmts from c"; Flux<FeedResponse<List>> queryObservable = createdCollection.queryItems(query, options, List.class); List<List> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) public void queryDocumentsIntegerValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<Integer> expectedValues = createdDocuments.stream().map(d -> d.getInt("prop")).collect(Collectors.toList()); String query = "Select value c.prop from c"; Flux<FeedResponse<Integer>> queryObservable = createdCollection.queryItems(query, options, Integer.class); List<Integer> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) @BeforeClass(groups = { "simple", "non-emulator" }, timeOut = 4 * SETUP_TIMEOUT) public void before_ParallelDocumentQueryTest() { client = clientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); try { truncateCollection(createdCollection); } catch (Throwable firstChanceException) { try { truncateCollection(createdCollection); } catch (Throwable lastChanceException) { String message = Strings.lenientFormat("container %s truncation failed due to first chance %s followed by last chance %s", createdCollection, firstChanceException, lastChanceException); logger.error(message); fail(message, lastChanceException); } } List<CosmosItemProperties> docDefList = new ArrayList<>(); for (int i = 0; i < 13; i++) { docDefList.add(getDocumentDefinition(i)); } for (int i = 0; i < 21; i++) { docDefList.add(getDocumentDefinition(99)); } createdDocuments = bulkInsertBlocking(createdCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } @AfterClass(groups = { "simple", "non-emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private static CosmosItemProperties getDocumentDefinition(int cnt) { String uuid = UUID.randomUUID().toString(); boolean boolVal = cnt % 2 == 0; CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"prop\" : %d, " + "\"boolProp\" : %b, " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, cnt, boolVal, uuid)); return doc; } static class TestObject{ String id; int prop; Boolean boolProp; String mypk; List<List<Integer>> sgmts; public String getId() { return id; } public void setId(String id) { this.id = id; } public Integer getProp() { return prop; } public void setProp(Integer prop) { this.prop = prop; } public Boolean getBoolProp() { return boolProp; } public void setBoolProp(Boolean boolProp) { this.boolProp = boolProp; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public List<List<Integer>> getSgmts() { return sgmts; } public void setSgmts(List<List<Integer>> sgmts) { this.sgmts = sgmts; } } @Test(groups = { "simple" }, timeOut = TIMEOUT, enabled = false) public void invalidQuerySytax() throws Exception { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder().instanceOf(CosmosClientException.class) .statusCode(400).notNullActivityId().build(); validateQueryFailure(queryObservable, validator); } public CosmosItemProperties createDocument(CosmosAsyncContainer cosmosContainer, int cnt) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(cnt); return cosmosContainer.createItem(docDefinition).block().getProperties(); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<CosmosItemProperties> expectedDocs) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.getResourceId()); } List<String> expectedIds = new ArrayList<String>(); for (CosmosItemProperties document : expectedDocs) { expectedIds.add(document.getResourceId()); } assertThat(actualIds).containsOnlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } }
Done
public void queryDocumentsArrayValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<List<Integer>> expectedValues = new ArrayList<>(); ArrayList<Integer> a1 = new ArrayList<Integer>(); ArrayList<Integer> a2 = new ArrayList<Integer>(); a1.add(6519456); a1.add(1471916863); a2.add(2498434); a2.add(1455671440); expectedValues.add(a1); expectedValues.add(a2); String query = "Select top 2 value c.sgmts from c"; Flux<FeedResponse<List>> queryObservable = createdCollection.queryItems(query, options, List.class); List<List> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); }
ArrayList<Integer> a1 = new ArrayList<Integer>();
public void queryDocumentsArrayValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); Collection<List<List<Integer>>> expectedValues = new ArrayList<>(); List<List<Integer>> lists = new ArrayList<>(); List<Integer> a1 = new ArrayList<>(); ArrayList<Integer> a2 = new ArrayList<>(); a1.add(6519456); a1.add(1471916863); a2.add(2498434); a2.add(1455671440); lists.add(a1); lists.add(a2); expectedValues.add(lists); expectedValues.add(lists); expectedValues.add(lists); expectedValues.add(lists); String query = "Select top 2 value c.sgmts from c"; Flux<FeedResponse<List>> queryObservable = createdCollection.queryItems(query, options, List.class); List<List> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); }
class ParallelDocumentQueryTest extends TestSuiteBase { private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdCollection; private List<CosmosItemProperties> createdDocuments; private CosmosAsyncClient client; public String getCollectionLink() { return TestUtils.getCollectionNameLink(createdDatabase.getId(), createdCollection.getId()); } @Factory(dataProvider = "clientBuildersWithDirect") public ParallelDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @DataProvider(name = "queryMetricsArgProvider") public Object[][] queryMetricsArgProvider() { return new Object[][]{ {true}, {false}, }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocuments(boolean qmEnabled) { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); options.setMaxDegreeOfParallelism(2); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments.stream().filter(d -> 99 == d.getInt("prop") ).collect(Collectors.toList()); assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .totalSize(expectedDocs.size()) .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.getResourceId()).collect(Collectors.toList())) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator, TIMEOUT); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryMetricEquality() throws Exception { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(true); options.setMaxDegreeOfParallelism(0); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList1 = queryObservable.collectList().block(); options.setMaxDegreeOfParallelism(4); Flux<FeedResponse<CosmosItemProperties>> threadedQueryObs = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList2 = threadedQueryObs.collectList().block(); assertThat(resultList1.size()).isEqualTo(resultList2.size()); for(int i = 0; i < resultList1.size(); i++){ compareQueryMetrics(BridgeInternal.queryMetricsFromFeedResponse(resultList1.get(i)), BridgeInternal.queryMetricsFromFeedResponse(resultList2.get(i))); } } private void compareQueryMetrics(Map<String, QueryMetrics> qm1, Map<String, QueryMetrics> qm2) { assertThat(qm1.keySet().size()).isEqualTo(qm2.keySet().size()); QueryMetrics queryMetrics1 = BridgeInternal.createQueryMetricsFromCollection(qm1.values()); QueryMetrics queryMetrics2 = BridgeInternal.createQueryMetricsFromCollection(qm2.values()); assertThat(queryMetrics1.getRetrievedDocumentSize()).isEqualTo(queryMetrics2.getRetrievedDocumentSize()); assertThat(queryMetrics1.getRetrievedDocumentCount()).isEqualTo(queryMetrics2.getRetrievedDocumentCount()); assertThat(queryMetrics1.getIndexHitDocumentCount()).isEqualTo(queryMetrics2.getIndexHitDocumentCount()); assertThat(queryMetrics1.getOutputDocumentCount()).isEqualTo(queryMetrics2.getOutputDocumentCount()); assertThat(queryMetrics1.getOutputDocumentSize()).isEqualTo(queryMetrics2.getOutputDocumentSize()); assertThat(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()) .isEqualTo(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() { String query = "SELECT * from root r where r.id = '2'"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPagesIsGreaterThanOrEqualTo(1) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .pageSizeIsLessThanOrEqualTo(0) .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void queryDocumentsWithPageSize() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); int pageSize = 3; options.maxItemCount(pageSize); options.setMaxDegreeOfParallelism(-1); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments; assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator .Builder<CosmosItemProperties>() .exactlyContainsInAnyOrder(expectedDocs .stream() .map(d -> d.getResourceId()) .collect(Collectors.toList())) .numberOfPagesIsGreaterThanOrEqualTo((expectedDocs.size() + 1) / 3) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0) .pageSizeIsLessThanOrEqualTo(pageSize) .build()) .build(); validateQuerySuccess(queryObservable, validator, 2 * subscriberValidationTimeout); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void invalidQuerySyntax() { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .notNullActivityId() .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void crossPartitionQueryNotEnabled() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void partitionKeyRangeId() { int sum = 0; for (String partitionKeyRangeId : CosmosBridgeInternal.getAsyncDocumentClient(client).readPartitionKeyRanges(getCollectionLink(), null) .flatMap(p -> Flux.fromIterable(p.getResults())) .map(Resource::getId).collectList().single().block()) { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); partitionKeyRangeIdInternal(options, partitionKeyRangeId); int queryResultCount = createdCollection.queryItems(query, options, CosmosItemProperties.class) .flatMap(p -> Flux.fromIterable(p.getResults())) .collectList().block().size(); sum += queryResultCount; } assertThat(sum).isEqualTo(createdDocuments.size()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void compositeContinuationTokenRoundTrip() throws Exception { { CompositeContinuationToken compositeContinuationToken = new CompositeContinuationToken("asdf", new Range<String>("A", "D", false, true)); String serialized = compositeContinuationToken.toString(); ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse(serialized, outCompositeContinuationToken); assertThat(succeeed).isTrue(); CompositeContinuationToken deserialized = outCompositeContinuationToken.v; String token = deserialized.getToken(); Range<String> range = deserialized.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"property\" : \"not a valid composite continuation token\"}", outCompositeContinuationToken); assertThat(succeeed).isFalse(); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"token\":\"-RID:tZFQAImzNLQLAAAAAAAAAA== assertThat(succeeed).isFalse(); } } @Test(groups = { "non-emulator" }, timeOut = TIMEOUT * 10) public void queryDocumentsWithCompositeContinuationTokens() throws Exception { String query = "SELECT * FROM c"; List<CosmosItemProperties> expectedDocs = new ArrayList<>(createdDocuments); assertThat(expectedDocs).isNotEmpty(); this.queryWithContinuationTokensAndPageSizes(query, new int[] {1, 10, 100}, expectedDocs); } @Test(groups = { "simple" }) public void queryDocumentsStringValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<String> expectedValues = createdDocuments.stream().map(d -> d.getId()).collect(Collectors.toList()); String query = "Select value c.id from c"; Flux<FeedResponse<String>> queryObservable = createdCollection.queryItems(query, options, String.class); List<String> fetchedResults = new ArrayList<>(); queryObservable.map(stringFeedResponse -> fetchedResults.addAll(stringFeedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) @Test(groups = { "simple" }) public void queryDocumentsIntegerValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<Integer> expectedValues = createdDocuments.stream().map(d -> d.getInt("prop")).collect(Collectors.toList()); String query = "Select value c.prop from c"; Flux<FeedResponse<Integer>> queryObservable = createdCollection.queryItems(query, options, Integer.class); List<Integer> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) public void queryDocumentsPojo(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); String query = "Select * from c"; Flux<FeedResponse<TestObject>> queryObservable = createdCollection.queryItems(query, options, TestObject.class); List<TestObject> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); System.out.println("fetchedResults = " + fetchedResults); } @BeforeClass(groups = { "simple", "non-emulator" }, timeOut = 4 * SETUP_TIMEOUT) public void before_ParallelDocumentQueryTest() { client = clientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); try { truncateCollection(createdCollection); } catch (Throwable firstChanceException) { try { truncateCollection(createdCollection); } catch (Throwable lastChanceException) { String message = Strings.lenientFormat("container %s truncation failed due to first chance %s followed by last chance %s", createdCollection, firstChanceException, lastChanceException); logger.error(message); fail(message, lastChanceException); } } List<CosmosItemProperties> docDefList = new ArrayList<>(); for (int i = 0; i < 13; i++) { docDefList.add(getDocumentDefinition(i)); } for (int i = 0; i < 21; i++) { docDefList.add(getDocumentDefinition(99)); } createdDocuments = bulkInsertBlocking(createdCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } @AfterClass(groups = { "simple", "non-emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private static CosmosItemProperties getDocumentDefinition(int cnt) { String uuid = UUID.randomUUID().toString(); boolean boolVal = cnt % 2 == 0; CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"prop\" : %d, " + "\"boolProp\" : %b, " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, cnt, boolVal, uuid)); return doc; } static class TestObject{ String id; int prop; Boolean boolProp; String mypk; List<List<Integer>> sgmts; public String getId() { return id; } public void setId(String id) { this.id = id; } public Integer getProp() { return prop; } public void setProp(Integer prop) { this.prop = prop; } public Boolean getBoolProp() { return boolProp; } public void setBoolProp(Boolean boolProp) { this.boolProp = boolProp; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public List<List<Integer>> getSgmts() { return sgmts; } public void setSgmts(List<List<Integer>> sgmts) { this.sgmts = sgmts; } } @Test(groups = { "simple" }, timeOut = TIMEOUT, enabled = false) public void invalidQuerySytax() throws Exception { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder().instanceOf(CosmosClientException.class) .statusCode(400).notNullActivityId().build(); validateQueryFailure(queryObservable, validator); } public CosmosItemProperties createDocument(CosmosAsyncContainer cosmosContainer, int cnt) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(cnt); return cosmosContainer.createItem(docDefinition).block().getProperties(); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<CosmosItemProperties> expectedDocs) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.getResourceId()); } List<String> expectedIds = new ArrayList<String>(); for (CosmosItemProperties document : expectedDocs) { expectedIds.add(document.getResourceId()); } assertThat(actualIds).containsOnlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } }
class ParallelDocumentQueryTest extends TestSuiteBase { private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdCollection; private List<CosmosItemProperties> createdDocuments; private CosmosAsyncClient client; public String getCollectionLink() { return TestUtils.getCollectionNameLink(createdDatabase.getId(), createdCollection.getId()); } @Factory(dataProvider = "clientBuildersWithDirect") public ParallelDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @DataProvider(name = "queryMetricsArgProvider") public Object[][] queryMetricsArgProvider() { return new Object[][]{ {true}, {false}, }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocuments(boolean qmEnabled) { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); options.setMaxDegreeOfParallelism(2); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments.stream().filter(d -> 99 == d.getInt("prop") ).collect(Collectors.toList()); assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .totalSize(expectedDocs.size()) .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.getResourceId()).collect(Collectors.toList())) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator, TIMEOUT); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryMetricEquality() throws Exception { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(true); options.setMaxDegreeOfParallelism(0); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList1 = queryObservable.collectList().block(); options.setMaxDegreeOfParallelism(4); Flux<FeedResponse<CosmosItemProperties>> threadedQueryObs = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList2 = threadedQueryObs.collectList().block(); assertThat(resultList1.size()).isEqualTo(resultList2.size()); for(int i = 0; i < resultList1.size(); i++){ compareQueryMetrics(BridgeInternal.queryMetricsFromFeedResponse(resultList1.get(i)), BridgeInternal.queryMetricsFromFeedResponse(resultList2.get(i))); } } private void compareQueryMetrics(Map<String, QueryMetrics> qm1, Map<String, QueryMetrics> qm2) { assertThat(qm1.keySet().size()).isEqualTo(qm2.keySet().size()); QueryMetrics queryMetrics1 = BridgeInternal.createQueryMetricsFromCollection(qm1.values()); QueryMetrics queryMetrics2 = BridgeInternal.createQueryMetricsFromCollection(qm2.values()); assertThat(queryMetrics1.getRetrievedDocumentSize()).isEqualTo(queryMetrics2.getRetrievedDocumentSize()); assertThat(queryMetrics1.getRetrievedDocumentCount()).isEqualTo(queryMetrics2.getRetrievedDocumentCount()); assertThat(queryMetrics1.getIndexHitDocumentCount()).isEqualTo(queryMetrics2.getIndexHitDocumentCount()); assertThat(queryMetrics1.getOutputDocumentCount()).isEqualTo(queryMetrics2.getOutputDocumentCount()); assertThat(queryMetrics1.getOutputDocumentSize()).isEqualTo(queryMetrics2.getOutputDocumentSize()); assertThat(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()) .isEqualTo(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() { String query = "SELECT * from root r where r.id = '2'"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPagesIsGreaterThanOrEqualTo(1) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .pageSizeIsLessThanOrEqualTo(0) .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void queryDocumentsWithPageSize() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); int pageSize = 3; options.maxItemCount(pageSize); options.setMaxDegreeOfParallelism(-1); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments; assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator .Builder<CosmosItemProperties>() .exactlyContainsInAnyOrder(expectedDocs .stream() .map(d -> d.getResourceId()) .collect(Collectors.toList())) .numberOfPagesIsGreaterThanOrEqualTo((expectedDocs.size() + 1) / 3) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0) .pageSizeIsLessThanOrEqualTo(pageSize) .build()) .build(); validateQuerySuccess(queryObservable, validator, 2 * subscriberValidationTimeout); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void invalidQuerySyntax() { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .notNullActivityId() .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void crossPartitionQueryNotEnabled() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void partitionKeyRangeId() { int sum = 0; for (String partitionKeyRangeId : CosmosBridgeInternal.getAsyncDocumentClient(client).readPartitionKeyRanges(getCollectionLink(), null) .flatMap(p -> Flux.fromIterable(p.getResults())) .map(Resource::getId).collectList().single().block()) { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); partitionKeyRangeIdInternal(options, partitionKeyRangeId); int queryResultCount = createdCollection.queryItems(query, options, CosmosItemProperties.class) .flatMap(p -> Flux.fromIterable(p.getResults())) .collectList().block().size(); sum += queryResultCount; } assertThat(sum).isEqualTo(createdDocuments.size()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void compositeContinuationTokenRoundTrip() throws Exception { { CompositeContinuationToken compositeContinuationToken = new CompositeContinuationToken("asdf", new Range<String>("A", "D", false, true)); String serialized = compositeContinuationToken.toString(); ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse(serialized, outCompositeContinuationToken); assertThat(succeeed).isTrue(); CompositeContinuationToken deserialized = outCompositeContinuationToken.v; String token = deserialized.getToken(); Range<String> range = deserialized.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"property\" : \"not a valid composite continuation token\"}", outCompositeContinuationToken); assertThat(succeeed).isFalse(); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"token\":\"-RID:tZFQAImzNLQLAAAAAAAAAA== assertThat(succeeed).isFalse(); } } @Test(groups = { "non-emulator" }, timeOut = TIMEOUT * 10) public void queryDocumentsWithCompositeContinuationTokens() throws Exception { String query = "SELECT * FROM c"; List<CosmosItemProperties> expectedDocs = new ArrayList<>(createdDocuments); assertThat(expectedDocs).isNotEmpty(); this.queryWithContinuationTokensAndPageSizes(query, new int[] {1, 10, 100}, expectedDocs); } @Test(groups = { "simple" }) public void queryDocumentsStringValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<String> expectedValues = createdDocuments.stream().map(d -> d.getId()).collect(Collectors.toList()); String query = "Select value c.id from c"; Flux<FeedResponse<String>> queryObservable = createdCollection.queryItems(query, options, String.class); List<String> fetchedResults = new ArrayList<>(); queryObservable.map(stringFeedResponse -> fetchedResults.addAll(stringFeedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) @Test(groups = { "simple" }) public void queryDocumentsIntegerValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<Integer> expectedValues = createdDocuments.stream().map(d -> d.getInt("prop")).collect(Collectors.toList()); String query = "Select value c.prop from c"; Flux<FeedResponse<Integer>> queryObservable = createdCollection.queryItems(query, options, Integer.class); List<Integer> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) public void queryDocumentsPojo(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); String query = "Select * from c"; Flux<FeedResponse<TestObject>> queryObservable = createdCollection.queryItems(query, options, TestObject.class); List<TestObject> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); List<Tuple> assertTuples = createdDocuments.stream() .map(cosmosItemProperties -> tuple(cosmosItemProperties.getId(), cosmosItemProperties.get("mypk"), cosmosItemProperties.get("prop"), cosmosItemProperties.get("boolProp"))) .collect(Collectors.toList()); assertThat(fetchedResults).extracting(TestObject::getId, TestObject::getMypk, TestObject::getProp, TestObject::getBoolProp) .containsAll(assertTuples); } @BeforeClass(groups = { "simple", "non-emulator" }, timeOut = 4 * SETUP_TIMEOUT) public void before_ParallelDocumentQueryTest() { client = clientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); try { truncateCollection(createdCollection); } catch (Throwable firstChanceException) { try { truncateCollection(createdCollection); } catch (Throwable lastChanceException) { String message = Strings.lenientFormat("container %s truncation failed due to first chance %s followed by last chance %s", createdCollection, firstChanceException, lastChanceException); logger.error(message); fail(message, lastChanceException); } } List<CosmosItemProperties> docDefList = new ArrayList<>(); for (int i = 0; i < 13; i++) { docDefList.add(getDocumentDefinition(i)); } for (int i = 0; i < 21; i++) { docDefList.add(getDocumentDefinition(99)); } createdDocuments = bulkInsertBlocking(createdCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } @AfterClass(groups = { "simple", "non-emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private static CosmosItemProperties getDocumentDefinition(int cnt) { String uuid = UUID.randomUUID().toString(); boolean boolVal = cnt % 2 == 0; CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"prop\" : %d, " + "\"boolProp\" : %b, " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, cnt, boolVal, uuid)); return doc; } static class TestObject{ String id; int prop; Boolean boolProp; String mypk; List<List<Integer>> sgmts; public String getId() { return id; } public void setId(String id) { this.id = id; } public Integer getProp() { return prop; } public void setProp(Integer prop) { this.prop = prop; } public Boolean getBoolProp() { return boolProp; } public void setBoolProp(Boolean boolProp) { this.boolProp = boolProp; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public List<List<Integer>> getSgmts() { return sgmts; } public void setSgmts(List<List<Integer>> sgmts) { this.sgmts = sgmts; } } @Test(groups = { "simple" }, timeOut = TIMEOUT, enabled = false) public void invalidQuerySytax() throws Exception { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder().instanceOf(CosmosClientException.class) .statusCode(400).notNullActivityId().build(); validateQueryFailure(queryObservable, validator); } public CosmosItemProperties createDocument(CosmosAsyncContainer cosmosContainer, int cnt) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(cnt); return cosmosContainer.createItem(docDefinition).block().getProperties(); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<CosmosItemProperties> expectedDocs) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.getResourceId()); } List<String> expectedIds = new ArrayList<String>(); for (CosmosItemProperties document : expectedDocs) { expectedIds.add(document.getResourceId()); } assertThat(actualIds).containsOnlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } }
Added assertions
public void queryDocumentsPojo(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); String query = "Select * from c"; Flux<FeedResponse<TestObject>> queryObservable = createdCollection.queryItems(query, options, TestObject.class); List<TestObject> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); System.out.println("fetchedResults = " + fetchedResults); }
System.out.println("fetchedResults = " + fetchedResults);
public void queryDocumentsPojo(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); String query = "Select * from c"; Flux<FeedResponse<TestObject>> queryObservable = createdCollection.queryItems(query, options, TestObject.class); List<TestObject> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); List<Tuple> assertTuples = createdDocuments.stream() .map(cosmosItemProperties -> tuple(cosmosItemProperties.getId(), cosmosItemProperties.get("mypk"), cosmosItemProperties.get("prop"), cosmosItemProperties.get("boolProp"))) .collect(Collectors.toList()); assertThat(fetchedResults).extracting(TestObject::getId, TestObject::getMypk, TestObject::getProp, TestObject::getBoolProp) .containsAll(assertTuples); }
class ParallelDocumentQueryTest extends TestSuiteBase { private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdCollection; private List<CosmosItemProperties> createdDocuments; private CosmosAsyncClient client; public String getCollectionLink() { return TestUtils.getCollectionNameLink(createdDatabase.getId(), createdCollection.getId()); } @Factory(dataProvider = "clientBuildersWithDirect") public ParallelDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @DataProvider(name = "queryMetricsArgProvider") public Object[][] queryMetricsArgProvider() { return new Object[][]{ {true}, {false}, }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocuments(boolean qmEnabled) { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); options.setMaxDegreeOfParallelism(2); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments.stream().filter(d -> 99 == d.getInt("prop") ).collect(Collectors.toList()); assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .totalSize(expectedDocs.size()) .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.getResourceId()).collect(Collectors.toList())) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator, TIMEOUT); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryMetricEquality() throws Exception { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(true); options.setMaxDegreeOfParallelism(0); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList1 = queryObservable.collectList().block(); options.setMaxDegreeOfParallelism(4); Flux<FeedResponse<CosmosItemProperties>> threadedQueryObs = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList2 = threadedQueryObs.collectList().block(); assertThat(resultList1.size()).isEqualTo(resultList2.size()); for(int i = 0; i < resultList1.size(); i++){ compareQueryMetrics(BridgeInternal.queryMetricsFromFeedResponse(resultList1.get(i)), BridgeInternal.queryMetricsFromFeedResponse(resultList2.get(i))); } } private void compareQueryMetrics(Map<String, QueryMetrics> qm1, Map<String, QueryMetrics> qm2) { assertThat(qm1.keySet().size()).isEqualTo(qm2.keySet().size()); QueryMetrics queryMetrics1 = BridgeInternal.createQueryMetricsFromCollection(qm1.values()); QueryMetrics queryMetrics2 = BridgeInternal.createQueryMetricsFromCollection(qm2.values()); assertThat(queryMetrics1.getRetrievedDocumentSize()).isEqualTo(queryMetrics2.getRetrievedDocumentSize()); assertThat(queryMetrics1.getRetrievedDocumentCount()).isEqualTo(queryMetrics2.getRetrievedDocumentCount()); assertThat(queryMetrics1.getIndexHitDocumentCount()).isEqualTo(queryMetrics2.getIndexHitDocumentCount()); assertThat(queryMetrics1.getOutputDocumentCount()).isEqualTo(queryMetrics2.getOutputDocumentCount()); assertThat(queryMetrics1.getOutputDocumentSize()).isEqualTo(queryMetrics2.getOutputDocumentSize()); assertThat(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()) .isEqualTo(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() { String query = "SELECT * from root r where r.id = '2'"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPagesIsGreaterThanOrEqualTo(1) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .pageSizeIsLessThanOrEqualTo(0) .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void queryDocumentsWithPageSize() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); int pageSize = 3; options.maxItemCount(pageSize); options.setMaxDegreeOfParallelism(-1); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments; assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator .Builder<CosmosItemProperties>() .exactlyContainsInAnyOrder(expectedDocs .stream() .map(d -> d.getResourceId()) .collect(Collectors.toList())) .numberOfPagesIsGreaterThanOrEqualTo((expectedDocs.size() + 1) / 3) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0) .pageSizeIsLessThanOrEqualTo(pageSize) .build()) .build(); validateQuerySuccess(queryObservable, validator, 2 * subscriberValidationTimeout); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void invalidQuerySyntax() { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .notNullActivityId() .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void crossPartitionQueryNotEnabled() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void partitionKeyRangeId() { int sum = 0; for (String partitionKeyRangeId : CosmosBridgeInternal.getAsyncDocumentClient(client).readPartitionKeyRanges(getCollectionLink(), null) .flatMap(p -> Flux.fromIterable(p.getResults())) .map(Resource::getId).collectList().single().block()) { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); partitionKeyRangeIdInternal(options, partitionKeyRangeId); int queryResultCount = createdCollection.queryItems(query, options, CosmosItemProperties.class) .flatMap(p -> Flux.fromIterable(p.getResults())) .collectList().block().size(); sum += queryResultCount; } assertThat(sum).isEqualTo(createdDocuments.size()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void compositeContinuationTokenRoundTrip() throws Exception { { CompositeContinuationToken compositeContinuationToken = new CompositeContinuationToken("asdf", new Range<String>("A", "D", false, true)); String serialized = compositeContinuationToken.toString(); ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse(serialized, outCompositeContinuationToken); assertThat(succeeed).isTrue(); CompositeContinuationToken deserialized = outCompositeContinuationToken.v; String token = deserialized.getToken(); Range<String> range = deserialized.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"property\" : \"not a valid composite continuation token\"}", outCompositeContinuationToken); assertThat(succeeed).isFalse(); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"token\":\"-RID:tZFQAImzNLQLAAAAAAAAAA== assertThat(succeeed).isFalse(); } } @Test(groups = { "non-emulator" }, timeOut = TIMEOUT * 10) public void queryDocumentsWithCompositeContinuationTokens() throws Exception { String query = "SELECT * FROM c"; List<CosmosItemProperties> expectedDocs = new ArrayList<>(createdDocuments); assertThat(expectedDocs).isNotEmpty(); this.queryWithContinuationTokensAndPageSizes(query, new int[] {1, 10, 100}, expectedDocs); } @Test(groups = { "simple" }) public void queryDocumentsStringValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<String> expectedValues = createdDocuments.stream().map(d -> d.getId()).collect(Collectors.toList()); String query = "Select value c.id from c"; Flux<FeedResponse<String>> queryObservable = createdCollection.queryItems(query, options, String.class); List<String> fetchedResults = new ArrayList<>(); queryObservable.map(stringFeedResponse -> fetchedResults.addAll(stringFeedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) public void queryDocumentsArrayValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<List<Integer>> expectedValues = new ArrayList<>(); ArrayList<Integer> a1 = new ArrayList<Integer>(); ArrayList<Integer> a2 = new ArrayList<Integer>(); a1.add(6519456); a1.add(1471916863); a2.add(2498434); a2.add(1455671440); expectedValues.add(a1); expectedValues.add(a2); String query = "Select top 2 value c.sgmts from c"; Flux<FeedResponse<List>> queryObservable = createdCollection.queryItems(query, options, List.class); List<List> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); } @Test(groups = { "simple" }) public void queryDocumentsIntegerValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<Integer> expectedValues = createdDocuments.stream().map(d -> d.getInt("prop")).collect(Collectors.toList()); String query = "Select value c.prop from c"; Flux<FeedResponse<Integer>> queryObservable = createdCollection.queryItems(query, options, Integer.class); List<Integer> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) @BeforeClass(groups = { "simple", "non-emulator" }, timeOut = 4 * SETUP_TIMEOUT) public void before_ParallelDocumentQueryTest() { client = clientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); try { truncateCollection(createdCollection); } catch (Throwable firstChanceException) { try { truncateCollection(createdCollection); } catch (Throwable lastChanceException) { String message = Strings.lenientFormat("container %s truncation failed due to first chance %s followed by last chance %s", createdCollection, firstChanceException, lastChanceException); logger.error(message); fail(message, lastChanceException); } } List<CosmosItemProperties> docDefList = new ArrayList<>(); for (int i = 0; i < 13; i++) { docDefList.add(getDocumentDefinition(i)); } for (int i = 0; i < 21; i++) { docDefList.add(getDocumentDefinition(99)); } createdDocuments = bulkInsertBlocking(createdCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } @AfterClass(groups = { "simple", "non-emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private static CosmosItemProperties getDocumentDefinition(int cnt) { String uuid = UUID.randomUUID().toString(); boolean boolVal = cnt % 2 == 0; CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"prop\" : %d, " + "\"boolProp\" : %b, " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, cnt, boolVal, uuid)); return doc; } static class TestObject{ String id; int prop; Boolean boolProp; String mypk; List<List<Integer>> sgmts; public String getId() { return id; } public void setId(String id) { this.id = id; } public Integer getProp() { return prop; } public void setProp(Integer prop) { this.prop = prop; } public Boolean getBoolProp() { return boolProp; } public void setBoolProp(Boolean boolProp) { this.boolProp = boolProp; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public List<List<Integer>> getSgmts() { return sgmts; } public void setSgmts(List<List<Integer>> sgmts) { this.sgmts = sgmts; } } @Test(groups = { "simple" }, timeOut = TIMEOUT, enabled = false) public void invalidQuerySytax() throws Exception { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder().instanceOf(CosmosClientException.class) .statusCode(400).notNullActivityId().build(); validateQueryFailure(queryObservable, validator); } public CosmosItemProperties createDocument(CosmosAsyncContainer cosmosContainer, int cnt) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(cnt); return cosmosContainer.createItem(docDefinition).block().getProperties(); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<CosmosItemProperties> expectedDocs) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.getResourceId()); } List<String> expectedIds = new ArrayList<String>(); for (CosmosItemProperties document : expectedDocs) { expectedIds.add(document.getResourceId()); } assertThat(actualIds).containsOnlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } }
class ParallelDocumentQueryTest extends TestSuiteBase { private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdCollection; private List<CosmosItemProperties> createdDocuments; private CosmosAsyncClient client; public String getCollectionLink() { return TestUtils.getCollectionNameLink(createdDatabase.getId(), createdCollection.getId()); } @Factory(dataProvider = "clientBuildersWithDirect") public ParallelDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @DataProvider(name = "queryMetricsArgProvider") public Object[][] queryMetricsArgProvider() { return new Object[][]{ {true}, {false}, }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocuments(boolean qmEnabled) { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); options.setMaxDegreeOfParallelism(2); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments.stream().filter(d -> 99 == d.getInt("prop") ).collect(Collectors.toList()); assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .totalSize(expectedDocs.size()) .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.getResourceId()).collect(Collectors.toList())) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator, TIMEOUT); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryMetricEquality() throws Exception { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(true); options.setMaxDegreeOfParallelism(0); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList1 = queryObservable.collectList().block(); options.setMaxDegreeOfParallelism(4); Flux<FeedResponse<CosmosItemProperties>> threadedQueryObs = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList2 = threadedQueryObs.collectList().block(); assertThat(resultList1.size()).isEqualTo(resultList2.size()); for(int i = 0; i < resultList1.size(); i++){ compareQueryMetrics(BridgeInternal.queryMetricsFromFeedResponse(resultList1.get(i)), BridgeInternal.queryMetricsFromFeedResponse(resultList2.get(i))); } } private void compareQueryMetrics(Map<String, QueryMetrics> qm1, Map<String, QueryMetrics> qm2) { assertThat(qm1.keySet().size()).isEqualTo(qm2.keySet().size()); QueryMetrics queryMetrics1 = BridgeInternal.createQueryMetricsFromCollection(qm1.values()); QueryMetrics queryMetrics2 = BridgeInternal.createQueryMetricsFromCollection(qm2.values()); assertThat(queryMetrics1.getRetrievedDocumentSize()).isEqualTo(queryMetrics2.getRetrievedDocumentSize()); assertThat(queryMetrics1.getRetrievedDocumentCount()).isEqualTo(queryMetrics2.getRetrievedDocumentCount()); assertThat(queryMetrics1.getIndexHitDocumentCount()).isEqualTo(queryMetrics2.getIndexHitDocumentCount()); assertThat(queryMetrics1.getOutputDocumentCount()).isEqualTo(queryMetrics2.getOutputDocumentCount()); assertThat(queryMetrics1.getOutputDocumentSize()).isEqualTo(queryMetrics2.getOutputDocumentSize()); assertThat(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()) .isEqualTo(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() { String query = "SELECT * from root r where r.id = '2'"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPagesIsGreaterThanOrEqualTo(1) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .pageSizeIsLessThanOrEqualTo(0) .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void queryDocumentsWithPageSize() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); int pageSize = 3; options.maxItemCount(pageSize); options.setMaxDegreeOfParallelism(-1); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments; assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator .Builder<CosmosItemProperties>() .exactlyContainsInAnyOrder(expectedDocs .stream() .map(d -> d.getResourceId()) .collect(Collectors.toList())) .numberOfPagesIsGreaterThanOrEqualTo((expectedDocs.size() + 1) / 3) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0) .pageSizeIsLessThanOrEqualTo(pageSize) .build()) .build(); validateQuerySuccess(queryObservable, validator, 2 * subscriberValidationTimeout); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void invalidQuerySyntax() { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .notNullActivityId() .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void crossPartitionQueryNotEnabled() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void partitionKeyRangeId() { int sum = 0; for (String partitionKeyRangeId : CosmosBridgeInternal.getAsyncDocumentClient(client).readPartitionKeyRanges(getCollectionLink(), null) .flatMap(p -> Flux.fromIterable(p.getResults())) .map(Resource::getId).collectList().single().block()) { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); partitionKeyRangeIdInternal(options, partitionKeyRangeId); int queryResultCount = createdCollection.queryItems(query, options, CosmosItemProperties.class) .flatMap(p -> Flux.fromIterable(p.getResults())) .collectList().block().size(); sum += queryResultCount; } assertThat(sum).isEqualTo(createdDocuments.size()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void compositeContinuationTokenRoundTrip() throws Exception { { CompositeContinuationToken compositeContinuationToken = new CompositeContinuationToken("asdf", new Range<String>("A", "D", false, true)); String serialized = compositeContinuationToken.toString(); ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse(serialized, outCompositeContinuationToken); assertThat(succeeed).isTrue(); CompositeContinuationToken deserialized = outCompositeContinuationToken.v; String token = deserialized.getToken(); Range<String> range = deserialized.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"property\" : \"not a valid composite continuation token\"}", outCompositeContinuationToken); assertThat(succeeed).isFalse(); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"token\":\"-RID:tZFQAImzNLQLAAAAAAAAAA== assertThat(succeeed).isFalse(); } } @Test(groups = { "non-emulator" }, timeOut = TIMEOUT * 10) public void queryDocumentsWithCompositeContinuationTokens() throws Exception { String query = "SELECT * FROM c"; List<CosmosItemProperties> expectedDocs = new ArrayList<>(createdDocuments); assertThat(expectedDocs).isNotEmpty(); this.queryWithContinuationTokensAndPageSizes(query, new int[] {1, 10, 100}, expectedDocs); } @Test(groups = { "simple" }) public void queryDocumentsStringValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<String> expectedValues = createdDocuments.stream().map(d -> d.getId()).collect(Collectors.toList()); String query = "Select value c.id from c"; Flux<FeedResponse<String>> queryObservable = createdCollection.queryItems(query, options, String.class); List<String> fetchedResults = new ArrayList<>(); queryObservable.map(stringFeedResponse -> fetchedResults.addAll(stringFeedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) public void queryDocumentsArrayValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); Collection<List<List<Integer>>> expectedValues = new ArrayList<>(); List<List<Integer>> lists = new ArrayList<>(); List<Integer> a1 = new ArrayList<>(); ArrayList<Integer> a2 = new ArrayList<>(); a1.add(6519456); a1.add(1471916863); a2.add(2498434); a2.add(1455671440); lists.add(a1); lists.add(a2); expectedValues.add(lists); expectedValues.add(lists); expectedValues.add(lists); expectedValues.add(lists); String query = "Select top 2 value c.sgmts from c"; Flux<FeedResponse<List>> queryObservable = createdCollection.queryItems(query, options, List.class); List<List> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) public void queryDocumentsIntegerValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<Integer> expectedValues = createdDocuments.stream().map(d -> d.getInt("prop")).collect(Collectors.toList()); String query = "Select value c.prop from c"; Flux<FeedResponse<Integer>> queryObservable = createdCollection.queryItems(query, options, Integer.class); List<Integer> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) @BeforeClass(groups = { "simple", "non-emulator" }, timeOut = 4 * SETUP_TIMEOUT) public void before_ParallelDocumentQueryTest() { client = clientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); try { truncateCollection(createdCollection); } catch (Throwable firstChanceException) { try { truncateCollection(createdCollection); } catch (Throwable lastChanceException) { String message = Strings.lenientFormat("container %s truncation failed due to first chance %s followed by last chance %s", createdCollection, firstChanceException, lastChanceException); logger.error(message); fail(message, lastChanceException); } } List<CosmosItemProperties> docDefList = new ArrayList<>(); for (int i = 0; i < 13; i++) { docDefList.add(getDocumentDefinition(i)); } for (int i = 0; i < 21; i++) { docDefList.add(getDocumentDefinition(99)); } createdDocuments = bulkInsertBlocking(createdCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } @AfterClass(groups = { "simple", "non-emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private static CosmosItemProperties getDocumentDefinition(int cnt) { String uuid = UUID.randomUUID().toString(); boolean boolVal = cnt % 2 == 0; CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"prop\" : %d, " + "\"boolProp\" : %b, " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, cnt, boolVal, uuid)); return doc; } static class TestObject{ String id; int prop; Boolean boolProp; String mypk; List<List<Integer>> sgmts; public String getId() { return id; } public void setId(String id) { this.id = id; } public Integer getProp() { return prop; } public void setProp(Integer prop) { this.prop = prop; } public Boolean getBoolProp() { return boolProp; } public void setBoolProp(Boolean boolProp) { this.boolProp = boolProp; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public List<List<Integer>> getSgmts() { return sgmts; } public void setSgmts(List<List<Integer>> sgmts) { this.sgmts = sgmts; } } @Test(groups = { "simple" }, timeOut = TIMEOUT, enabled = false) public void invalidQuerySytax() throws Exception { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder().instanceOf(CosmosClientException.class) .statusCode(400).notNullActivityId().build(); validateQueryFailure(queryObservable, validator); } public CosmosItemProperties createDocument(CosmosAsyncContainer cosmosContainer, int cnt) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(cnt); return cosmosContainer.createItem(docDefinition).block().getProperties(); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<CosmosItemProperties> expectedDocs) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.getResourceId()); } List<String> expectedIds = new ArrayList<String>(); for (CosmosItemProperties document : expectedDocs) { expectedIds.add(document.getResourceId()); } assertThat(actualIds).containsOnlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } }
Added assertions
public void queryDocumentsArrayValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<List<Integer>> expectedValues = new ArrayList<>(); ArrayList<Integer> a1 = new ArrayList<Integer>(); ArrayList<Integer> a2 = new ArrayList<Integer>(); a1.add(6519456); a1.add(1471916863); a2.add(2498434); a2.add(1455671440); expectedValues.add(a1); expectedValues.add(a2); String query = "Select top 2 value c.sgmts from c"; Flux<FeedResponse<List>> queryObservable = createdCollection.queryItems(query, options, List.class); List<List> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); }
queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast();
public void queryDocumentsArrayValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); Collection<List<List<Integer>>> expectedValues = new ArrayList<>(); List<List<Integer>> lists = new ArrayList<>(); List<Integer> a1 = new ArrayList<>(); ArrayList<Integer> a2 = new ArrayList<>(); a1.add(6519456); a1.add(1471916863); a2.add(2498434); a2.add(1455671440); lists.add(a1); lists.add(a2); expectedValues.add(lists); expectedValues.add(lists); expectedValues.add(lists); expectedValues.add(lists); String query = "Select top 2 value c.sgmts from c"; Flux<FeedResponse<List>> queryObservable = createdCollection.queryItems(query, options, List.class); List<List> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); }
class ParallelDocumentQueryTest extends TestSuiteBase { private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdCollection; private List<CosmosItemProperties> createdDocuments; private CosmosAsyncClient client; public String getCollectionLink() { return TestUtils.getCollectionNameLink(createdDatabase.getId(), createdCollection.getId()); } @Factory(dataProvider = "clientBuildersWithDirect") public ParallelDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @DataProvider(name = "queryMetricsArgProvider") public Object[][] queryMetricsArgProvider() { return new Object[][]{ {true}, {false}, }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocuments(boolean qmEnabled) { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); options.setMaxDegreeOfParallelism(2); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments.stream().filter(d -> 99 == d.getInt("prop") ).collect(Collectors.toList()); assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .totalSize(expectedDocs.size()) .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.getResourceId()).collect(Collectors.toList())) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator, TIMEOUT); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryMetricEquality() throws Exception { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(true); options.setMaxDegreeOfParallelism(0); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList1 = queryObservable.collectList().block(); options.setMaxDegreeOfParallelism(4); Flux<FeedResponse<CosmosItemProperties>> threadedQueryObs = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList2 = threadedQueryObs.collectList().block(); assertThat(resultList1.size()).isEqualTo(resultList2.size()); for(int i = 0; i < resultList1.size(); i++){ compareQueryMetrics(BridgeInternal.queryMetricsFromFeedResponse(resultList1.get(i)), BridgeInternal.queryMetricsFromFeedResponse(resultList2.get(i))); } } private void compareQueryMetrics(Map<String, QueryMetrics> qm1, Map<String, QueryMetrics> qm2) { assertThat(qm1.keySet().size()).isEqualTo(qm2.keySet().size()); QueryMetrics queryMetrics1 = BridgeInternal.createQueryMetricsFromCollection(qm1.values()); QueryMetrics queryMetrics2 = BridgeInternal.createQueryMetricsFromCollection(qm2.values()); assertThat(queryMetrics1.getRetrievedDocumentSize()).isEqualTo(queryMetrics2.getRetrievedDocumentSize()); assertThat(queryMetrics1.getRetrievedDocumentCount()).isEqualTo(queryMetrics2.getRetrievedDocumentCount()); assertThat(queryMetrics1.getIndexHitDocumentCount()).isEqualTo(queryMetrics2.getIndexHitDocumentCount()); assertThat(queryMetrics1.getOutputDocumentCount()).isEqualTo(queryMetrics2.getOutputDocumentCount()); assertThat(queryMetrics1.getOutputDocumentSize()).isEqualTo(queryMetrics2.getOutputDocumentSize()); assertThat(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()) .isEqualTo(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() { String query = "SELECT * from root r where r.id = '2'"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPagesIsGreaterThanOrEqualTo(1) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .pageSizeIsLessThanOrEqualTo(0) .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void queryDocumentsWithPageSize() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); int pageSize = 3; options.maxItemCount(pageSize); options.setMaxDegreeOfParallelism(-1); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments; assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator .Builder<CosmosItemProperties>() .exactlyContainsInAnyOrder(expectedDocs .stream() .map(d -> d.getResourceId()) .collect(Collectors.toList())) .numberOfPagesIsGreaterThanOrEqualTo((expectedDocs.size() + 1) / 3) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0) .pageSizeIsLessThanOrEqualTo(pageSize) .build()) .build(); validateQuerySuccess(queryObservable, validator, 2 * subscriberValidationTimeout); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void invalidQuerySyntax() { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .notNullActivityId() .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void crossPartitionQueryNotEnabled() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void partitionKeyRangeId() { int sum = 0; for (String partitionKeyRangeId : CosmosBridgeInternal.getAsyncDocumentClient(client).readPartitionKeyRanges(getCollectionLink(), null) .flatMap(p -> Flux.fromIterable(p.getResults())) .map(Resource::getId).collectList().single().block()) { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); partitionKeyRangeIdInternal(options, partitionKeyRangeId); int queryResultCount = createdCollection.queryItems(query, options, CosmosItemProperties.class) .flatMap(p -> Flux.fromIterable(p.getResults())) .collectList().block().size(); sum += queryResultCount; } assertThat(sum).isEqualTo(createdDocuments.size()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void compositeContinuationTokenRoundTrip() throws Exception { { CompositeContinuationToken compositeContinuationToken = new CompositeContinuationToken("asdf", new Range<String>("A", "D", false, true)); String serialized = compositeContinuationToken.toString(); ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse(serialized, outCompositeContinuationToken); assertThat(succeeed).isTrue(); CompositeContinuationToken deserialized = outCompositeContinuationToken.v; String token = deserialized.getToken(); Range<String> range = deserialized.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"property\" : \"not a valid composite continuation token\"}", outCompositeContinuationToken); assertThat(succeeed).isFalse(); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"token\":\"-RID:tZFQAImzNLQLAAAAAAAAAA== assertThat(succeeed).isFalse(); } } @Test(groups = { "non-emulator" }, timeOut = TIMEOUT * 10) public void queryDocumentsWithCompositeContinuationTokens() throws Exception { String query = "SELECT * FROM c"; List<CosmosItemProperties> expectedDocs = new ArrayList<>(createdDocuments); assertThat(expectedDocs).isNotEmpty(); this.queryWithContinuationTokensAndPageSizes(query, new int[] {1, 10, 100}, expectedDocs); } @Test(groups = { "simple" }) public void queryDocumentsStringValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<String> expectedValues = createdDocuments.stream().map(d -> d.getId()).collect(Collectors.toList()); String query = "Select value c.id from c"; Flux<FeedResponse<String>> queryObservable = createdCollection.queryItems(query, options, String.class); List<String> fetchedResults = new ArrayList<>(); queryObservable.map(stringFeedResponse -> fetchedResults.addAll(stringFeedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) @Test(groups = { "simple" }) public void queryDocumentsIntegerValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<Integer> expectedValues = createdDocuments.stream().map(d -> d.getInt("prop")).collect(Collectors.toList()); String query = "Select value c.prop from c"; Flux<FeedResponse<Integer>> queryObservable = createdCollection.queryItems(query, options, Integer.class); List<Integer> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) public void queryDocumentsPojo(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); String query = "Select * from c"; Flux<FeedResponse<TestObject>> queryObservable = createdCollection.queryItems(query, options, TestObject.class); List<TestObject> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); System.out.println("fetchedResults = " + fetchedResults); } @BeforeClass(groups = { "simple", "non-emulator" }, timeOut = 4 * SETUP_TIMEOUT) public void before_ParallelDocumentQueryTest() { client = clientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); try { truncateCollection(createdCollection); } catch (Throwable firstChanceException) { try { truncateCollection(createdCollection); } catch (Throwable lastChanceException) { String message = Strings.lenientFormat("container %s truncation failed due to first chance %s followed by last chance %s", createdCollection, firstChanceException, lastChanceException); logger.error(message); fail(message, lastChanceException); } } List<CosmosItemProperties> docDefList = new ArrayList<>(); for (int i = 0; i < 13; i++) { docDefList.add(getDocumentDefinition(i)); } for (int i = 0; i < 21; i++) { docDefList.add(getDocumentDefinition(99)); } createdDocuments = bulkInsertBlocking(createdCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } @AfterClass(groups = { "simple", "non-emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private static CosmosItemProperties getDocumentDefinition(int cnt) { String uuid = UUID.randomUUID().toString(); boolean boolVal = cnt % 2 == 0; CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"prop\" : %d, " + "\"boolProp\" : %b, " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, cnt, boolVal, uuid)); return doc; } static class TestObject{ String id; int prop; Boolean boolProp; String mypk; List<List<Integer>> sgmts; public String getId() { return id; } public void setId(String id) { this.id = id; } public Integer getProp() { return prop; } public void setProp(Integer prop) { this.prop = prop; } public Boolean getBoolProp() { return boolProp; } public void setBoolProp(Boolean boolProp) { this.boolProp = boolProp; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public List<List<Integer>> getSgmts() { return sgmts; } public void setSgmts(List<List<Integer>> sgmts) { this.sgmts = sgmts; } } @Test(groups = { "simple" }, timeOut = TIMEOUT, enabled = false) public void invalidQuerySytax() throws Exception { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder().instanceOf(CosmosClientException.class) .statusCode(400).notNullActivityId().build(); validateQueryFailure(queryObservable, validator); } public CosmosItemProperties createDocument(CosmosAsyncContainer cosmosContainer, int cnt) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(cnt); return cosmosContainer.createItem(docDefinition).block().getProperties(); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<CosmosItemProperties> expectedDocs) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.getResourceId()); } List<String> expectedIds = new ArrayList<String>(); for (CosmosItemProperties document : expectedDocs) { expectedIds.add(document.getResourceId()); } assertThat(actualIds).containsOnlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } }
class ParallelDocumentQueryTest extends TestSuiteBase { private CosmosAsyncDatabase createdDatabase; private CosmosAsyncContainer createdCollection; private List<CosmosItemProperties> createdDocuments; private CosmosAsyncClient client; public String getCollectionLink() { return TestUtils.getCollectionNameLink(createdDatabase.getId(), createdCollection.getId()); } @Factory(dataProvider = "clientBuildersWithDirect") public ParallelDocumentQueryTest(CosmosClientBuilder clientBuilder) { super(clientBuilder); } @DataProvider(name = "queryMetricsArgProvider") public Object[][] queryMetricsArgProvider() { return new Object[][]{ {true}, {false}, }; } @Test(groups = { "simple" }, timeOut = TIMEOUT, dataProvider = "queryMetricsArgProvider") public void queryDocuments(boolean qmEnabled) { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(qmEnabled); options.setMaxDegreeOfParallelism(2); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments.stream().filter(d -> 99 == d.getInt("prop") ).collect(Collectors.toList()); assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .totalSize(expectedDocs.size()) .exactlyContainsInAnyOrder(expectedDocs.stream().map(d -> d.getResourceId()).collect(Collectors.toList())) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0).build()) .hasValidQueryMetrics(qmEnabled) .build(); validateQuerySuccess(queryObservable, validator, TIMEOUT); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryMetricEquality() throws Exception { String query = "SELECT * from c where c.prop = 99"; FeedOptions options = new FeedOptions(); options.maxItemCount(5); options.setEnableCrossPartitionQuery(true); options.populateQueryMetrics(true); options.setMaxDegreeOfParallelism(0); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList1 = queryObservable.collectList().block(); options.setMaxDegreeOfParallelism(4); Flux<FeedResponse<CosmosItemProperties>> threadedQueryObs = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<FeedResponse<CosmosItemProperties>> resultList2 = threadedQueryObs.collectList().block(); assertThat(resultList1.size()).isEqualTo(resultList2.size()); for(int i = 0; i < resultList1.size(); i++){ compareQueryMetrics(BridgeInternal.queryMetricsFromFeedResponse(resultList1.get(i)), BridgeInternal.queryMetricsFromFeedResponse(resultList2.get(i))); } } private void compareQueryMetrics(Map<String, QueryMetrics> qm1, Map<String, QueryMetrics> qm2) { assertThat(qm1.keySet().size()).isEqualTo(qm2.keySet().size()); QueryMetrics queryMetrics1 = BridgeInternal.createQueryMetricsFromCollection(qm1.values()); QueryMetrics queryMetrics2 = BridgeInternal.createQueryMetricsFromCollection(qm2.values()); assertThat(queryMetrics1.getRetrievedDocumentSize()).isEqualTo(queryMetrics2.getRetrievedDocumentSize()); assertThat(queryMetrics1.getRetrievedDocumentCount()).isEqualTo(queryMetrics2.getRetrievedDocumentCount()); assertThat(queryMetrics1.getIndexHitDocumentCount()).isEqualTo(queryMetrics2.getIndexHitDocumentCount()); assertThat(queryMetrics1.getOutputDocumentCount()).isEqualTo(queryMetrics2.getOutputDocumentCount()); assertThat(queryMetrics1.getOutputDocumentSize()).isEqualTo(queryMetrics2.getOutputDocumentSize()); assertThat(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()) .isEqualTo(BridgeInternal.getClientSideMetrics(queryMetrics1).getRequestCharge()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void queryDocuments_NoResults() { String query = "SELECT * from root r where r.id = '2'"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator.Builder<CosmosItemProperties>() .containsExactly(new ArrayList<>()) .numberOfPagesIsGreaterThanOrEqualTo(1) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .pageSizeIsLessThanOrEqualTo(0) .requestChargeGreaterThanOrEqualTo(1.0).build()) .build(); validateQuerySuccess(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void queryDocumentsWithPageSize() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); int pageSize = 3; options.maxItemCount(pageSize); options.setMaxDegreeOfParallelism(-1); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); List<CosmosItemProperties> expectedDocs = createdDocuments; assertThat(expectedDocs).isNotEmpty(); FeedResponseListValidator<CosmosItemProperties> validator = new FeedResponseListValidator .Builder<CosmosItemProperties>() .exactlyContainsInAnyOrder(expectedDocs .stream() .map(d -> d.getResourceId()) .collect(Collectors.toList())) .numberOfPagesIsGreaterThanOrEqualTo((expectedDocs.size() + 1) / 3) .allPagesSatisfy(new FeedResponseValidator.Builder<CosmosItemProperties>() .requestChargeGreaterThanOrEqualTo(1.0) .pageSizeIsLessThanOrEqualTo(pageSize) .build()) .build(); validateQuerySuccess(queryObservable, validator, 2 * subscriberValidationTimeout); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void invalidQuerySyntax() { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .notNullActivityId() .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void crossPartitionQueryNotEnabled() { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder() .instanceOf(CosmosClientException.class) .statusCode(400) .build(); validateQueryFailure(queryObservable, validator); } @Test(groups = { "simple" }, timeOut = 2 * TIMEOUT) public void partitionKeyRangeId() { int sum = 0; for (String partitionKeyRangeId : CosmosBridgeInternal.getAsyncDocumentClient(client).readPartitionKeyRanges(getCollectionLink(), null) .flatMap(p -> Flux.fromIterable(p.getResults())) .map(Resource::getId).collectList().single().block()) { String query = "SELECT * from root"; FeedOptions options = new FeedOptions(); partitionKeyRangeIdInternal(options, partitionKeyRangeId); int queryResultCount = createdCollection.queryItems(query, options, CosmosItemProperties.class) .flatMap(p -> Flux.fromIterable(p.getResults())) .collectList().block().size(); sum += queryResultCount; } assertThat(sum).isEqualTo(createdDocuments.size()); } @Test(groups = { "simple" }, timeOut = TIMEOUT) public void compositeContinuationTokenRoundTrip() throws Exception { { CompositeContinuationToken compositeContinuationToken = new CompositeContinuationToken("asdf", new Range<String>("A", "D", false, true)); String serialized = compositeContinuationToken.toString(); ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse(serialized, outCompositeContinuationToken); assertThat(succeeed).isTrue(); CompositeContinuationToken deserialized = outCompositeContinuationToken.v; String token = deserialized.getToken(); Range<String> range = deserialized.getRange(); assertThat(token).isEqualTo("asdf"); assertThat(range.getMin()).isEqualTo("A"); assertThat(range.getMax()).isEqualTo("D"); assertThat(range.isMinInclusive()).isEqualTo(false); assertThat(range.isMaxInclusive()).isEqualTo(true); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"property\" : \"not a valid composite continuation token\"}", outCompositeContinuationToken); assertThat(succeeed).isFalse(); } { ValueHolder<CompositeContinuationToken> outCompositeContinuationToken = new ValueHolder<CompositeContinuationToken>(); boolean succeeed = CompositeContinuationToken.tryParse("{\"token\":\"-RID:tZFQAImzNLQLAAAAAAAAAA== assertThat(succeeed).isFalse(); } } @Test(groups = { "non-emulator" }, timeOut = TIMEOUT * 10) public void queryDocumentsWithCompositeContinuationTokens() throws Exception { String query = "SELECT * FROM c"; List<CosmosItemProperties> expectedDocs = new ArrayList<>(createdDocuments); assertThat(expectedDocs).isNotEmpty(); this.queryWithContinuationTokensAndPageSizes(query, new int[] {1, 10, 100}, expectedDocs); } @Test(groups = { "simple" }) public void queryDocumentsStringValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<String> expectedValues = createdDocuments.stream().map(d -> d.getId()).collect(Collectors.toList()); String query = "Select value c.id from c"; Flux<FeedResponse<String>> queryObservable = createdCollection.queryItems(query, options, String.class); List<String> fetchedResults = new ArrayList<>(); queryObservable.map(stringFeedResponse -> fetchedResults.addAll(stringFeedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) @Test(groups = { "simple" }) public void queryDocumentsIntegerValue(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); List<Integer> expectedValues = createdDocuments.stream().map(d -> d.getInt("prop")).collect(Collectors.toList()); String query = "Select value c.prop from c"; Flux<FeedResponse<Integer>> queryObservable = createdCollection.queryItems(query, options, Integer.class); List<Integer> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); assertThat(fetchedResults).containsAll(expectedValues); } @Test(groups = { "simple" }) public void queryDocumentsPojo(){ FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); String query = "Select * from c"; Flux<FeedResponse<TestObject>> queryObservable = createdCollection.queryItems(query, options, TestObject.class); List<TestObject> fetchedResults = new ArrayList<>(); queryObservable.map(feedResponse -> fetchedResults.addAll(feedResponse.getResults())).blockLast(); List<Tuple> assertTuples = createdDocuments.stream() .map(cosmosItemProperties -> tuple(cosmosItemProperties.getId(), cosmosItemProperties.get("mypk"), cosmosItemProperties.get("prop"), cosmosItemProperties.get("boolProp"))) .collect(Collectors.toList()); assertThat(fetchedResults).extracting(TestObject::getId, TestObject::getMypk, TestObject::getProp, TestObject::getBoolProp) .containsAll(assertTuples); } @BeforeClass(groups = { "simple", "non-emulator" }, timeOut = 4 * SETUP_TIMEOUT) public void before_ParallelDocumentQueryTest() { client = clientBuilder().buildAsyncClient(); createdDatabase = getSharedCosmosDatabase(client); createdCollection = getSharedMultiPartitionCosmosContainer(client); try { truncateCollection(createdCollection); } catch (Throwable firstChanceException) { try { truncateCollection(createdCollection); } catch (Throwable lastChanceException) { String message = Strings.lenientFormat("container %s truncation failed due to first chance %s followed by last chance %s", createdCollection, firstChanceException, lastChanceException); logger.error(message); fail(message, lastChanceException); } } List<CosmosItemProperties> docDefList = new ArrayList<>(); for (int i = 0; i < 13; i++) { docDefList.add(getDocumentDefinition(i)); } for (int i = 0; i < 21; i++) { docDefList.add(getDocumentDefinition(99)); } createdDocuments = bulkInsertBlocking(createdCollection, docDefList); waitIfNeededForReplicasToCatchUp(clientBuilder()); } @AfterClass(groups = { "simple", "non-emulator" }, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(client); } private static CosmosItemProperties getDocumentDefinition(int cnt) { String uuid = UUID.randomUUID().toString(); boolean boolVal = cnt % 2 == 0; CosmosItemProperties doc = new CosmosItemProperties(String.format("{ " + "\"id\": \"%s\", " + "\"prop\" : %d, " + "\"boolProp\" : %b, " + "\"mypk\": \"%s\", " + "\"sgmts\": [[6519456, 1471916863], [2498434, 1455671440]]" + "}" , uuid, cnt, boolVal, uuid)); return doc; } static class TestObject{ String id; int prop; Boolean boolProp; String mypk; List<List<Integer>> sgmts; public String getId() { return id; } public void setId(String id) { this.id = id; } public Integer getProp() { return prop; } public void setProp(Integer prop) { this.prop = prop; } public Boolean getBoolProp() { return boolProp; } public void setBoolProp(Boolean boolProp) { this.boolProp = boolProp; } public String getMypk() { return mypk; } public void setMypk(String mypk) { this.mypk = mypk; } public List<List<Integer>> getSgmts() { return sgmts; } public void setSgmts(List<List<Integer>> sgmts) { this.sgmts = sgmts; } } @Test(groups = { "simple" }, timeOut = TIMEOUT, enabled = false) public void invalidQuerySytax() throws Exception { String query = "I am an invalid query"; FeedOptions options = new FeedOptions(); options.setEnableCrossPartitionQuery(true); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); FailureValidator validator = new FailureValidator.Builder().instanceOf(CosmosClientException.class) .statusCode(400).notNullActivityId().build(); validateQueryFailure(queryObservable, validator); } public CosmosItemProperties createDocument(CosmosAsyncContainer cosmosContainer, int cnt) throws CosmosClientException { CosmosItemProperties docDefinition = getDocumentDefinition(cnt); return cosmosContainer.createItem(docDefinition).block().getProperties(); } private void queryWithContinuationTokensAndPageSizes(String query, int[] pageSizes, List<CosmosItemProperties> expectedDocs) { for (int pageSize : pageSizes) { List<CosmosItemProperties> receivedDocuments = this.queryWithContinuationTokens(query, pageSize); List<String> actualIds = new ArrayList<String>(); for (CosmosItemProperties document : receivedDocuments) { actualIds.add(document.getResourceId()); } List<String> expectedIds = new ArrayList<String>(); for (CosmosItemProperties document : expectedDocs) { expectedIds.add(document.getResourceId()); } assertThat(actualIds).containsOnlyElementsOf(expectedIds); } } private List<CosmosItemProperties> queryWithContinuationTokens(String query, int pageSize) { String requestContinuation = null; List<String> continuationTokens = new ArrayList<String>(); List<CosmosItemProperties> receivedDocuments = new ArrayList<CosmosItemProperties>(); do { FeedOptions options = new FeedOptions(); options.maxItemCount(pageSize); options.setEnableCrossPartitionQuery(true); options.setMaxDegreeOfParallelism(2); options.requestContinuation(requestContinuation); Flux<FeedResponse<CosmosItemProperties>> queryObservable = createdCollection.queryItems(query, options, CosmosItemProperties.class); TestSubscriber<FeedResponse<CosmosItemProperties>> testSubscriber = new TestSubscriber<>(); queryObservable.subscribe(testSubscriber); testSubscriber.awaitTerminalEvent(TIMEOUT, TimeUnit.MILLISECONDS); testSubscriber.assertNoErrors(); testSubscriber.assertComplete(); FeedResponse<CosmosItemProperties> firstPage = (FeedResponse<CosmosItemProperties>) testSubscriber.getEvents().get(0).get(0); requestContinuation = firstPage.getContinuationToken(); receivedDocuments.addAll(firstPage.getResults()); continuationTokens.add(requestContinuation); } while (requestContinuation != null); return receivedDocuments; } }
@alzimmermsft - I've been thinking a bit to see how we can achieve the correct behavior here, i.e. avoiding 2 extra network calls consumer didn't ask us to perform, the new base type `ContinuablePagedFluxCore` will take care of this. I've updated the test to validate the expected next call count (i.e. 0). Please take a look and let me know if you've any concerns.
public void streamFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.streamByPage().limit(1).collect(Collectors.toList()).get(0)); assertEquals(0, pagedFlux.getNextPageRetrievals()); }
assertEquals(0, pagedFlux.getNextPageRetrievals());
public void streamFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.streamByPage().limit(1).collect(Collectors.toList()).get(0)); assertEquals(0, pagedFlux.getNextPageRetrievals()); }
class PagedIterableTest { private List<PagedResponse<Integer>> pagedResponses; private List<PagedResponse<String>> pagedStringResponses; private HttpHeaders httpHeaders = new HttpHeaders().put("header1", "value1").put("header2", "value2"); private HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, "http: private String deserializedHeaders = "header1,value1,header2,value2"; @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = pagedIterable.streamByPage().collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = new ArrayList<>(); pagedIterable.iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = pagedIterable.stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = new ArrayList<>(); pagedIterable.iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = pagedIterable.mapPage(String::valueOf).streamByPage() .collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = pagedIterable.mapPage(String::valueOf).stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @Test @Test public void iterateFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.iterableByPage().iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } @Test public void streamFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.stream().limit(1).collect(Collectors.toList()).get(0)); } @Test public void iterateFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } private PagedFlux<Integer> getIntegerPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new PagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private TestPagedFlux<Integer> getTestPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new TestPagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private void createPagedResponse(int numberOfPages) { pagedResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getItems, i)) .collect(Collectors.toList()); pagedStringResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getStringItems, i)) .collect(Collectors.toList()); } private <T> PagedResponseBase<String, T> createPagedResponse(HttpRequest httpRequest, HttpHeaders headers, String deserializedHeaders, int numberOfPages, Function<Integer, List<T>> valueSupplier, int i) { return new PagedResponseBase<>(httpRequest, 200, headers, valueSupplier.apply(i), (i < numberOfPages - 1) ? String.valueOf(i + 1) : null, deserializedHeaders); } private Mono<PagedResponse<Integer>> getNextPage(String continuationToken, List<PagedResponse<Integer>> pagedResponses) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } return Mono.just(pagedResponses.get(Integer.parseInt(continuationToken))); } private List<Integer> getItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().collect(Collectors.toList()); } private List<String> getStringItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().map(String::valueOf).collect(Collectors.toList()); } /* * Test class used to verify that paged iterable will lazily request next pages. */ private static class TestPagedFlux<T> extends PagedFlux<T> { private int nextPageRetrievals = 0; TestPagedFlux(Supplier<Mono<PagedResponse<T>>> firstPageRetriever, Function<String, Mono<PagedResponse<T>>> nextPageRetriever) { super(firstPageRetriever, nextPageRetriever); } @Override public Flux<PagedResponse<T>> byPage(String continuationToken) { nextPageRetrievals++; return super.byPage(continuationToken); } /* * Returns the number of times another page has been retrieved. */ int getNextPageRetrievals() { return nextPageRetrievals; } } }
class PagedIterableTest { private List<PagedResponse<Integer>> pagedResponses; private List<PagedResponse<String>> pagedStringResponses; private HttpHeaders httpHeaders = new HttpHeaders().put("header1", "value1").put("header2", "value2"); private HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, "http: private String deserializedHeaders = "header1,value1,header2,value2"; @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = pagedIterable.streamByPage().collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = new ArrayList<>(); pagedIterable.iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = pagedIterable.stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = new ArrayList<>(); pagedIterable.iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = pagedIterable.mapPage(String::valueOf).streamByPage() .collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = pagedIterable.mapPage(String::valueOf).stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @Test @Test public void iterateFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.iterableByPage().iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } @Test public void streamFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.stream().limit(1).collect(Collectors.toList()).get(0)); } @Test public void iterateFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } private PagedFlux<Integer> getIntegerPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new PagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private TestPagedFlux<Integer> getTestPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new TestPagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private void createPagedResponse(int numberOfPages) { pagedResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getItems, i)) .collect(Collectors.toList()); pagedStringResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getStringItems, i)) .collect(Collectors.toList()); } private <T> PagedResponseBase<String, T> createPagedResponse(HttpRequest httpRequest, HttpHeaders headers, String deserializedHeaders, int numberOfPages, Function<Integer, List<T>> valueSupplier, int i) { return new PagedResponseBase<>(httpRequest, 200, headers, valueSupplier.apply(i), (i < numberOfPages - 1) ? String.valueOf(i + 1) : null, deserializedHeaders); } private Mono<PagedResponse<Integer>> getNextPage(String continuationToken, List<PagedResponse<Integer>> pagedResponses) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } return Mono.just(pagedResponses.get(Integer.parseInt(continuationToken))); } private List<Integer> getItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().collect(Collectors.toList()); } private List<String> getStringItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().map(String::valueOf).collect(Collectors.toList()); } /* * Test class used to verify that paged iterable will lazily request next pages. */ private static class TestPagedFlux<T> extends PagedFlux<T> { private int nextPageRetrievals = 0; TestPagedFlux(Supplier<Mono<PagedResponse<T>>> firstPageRetriever, Function<String, Mono<PagedResponse<T>>> nextPageRetriever) { super(firstPageRetriever, nextPageRetriever); } @Override public Flux<PagedResponse<T>> byPage(String continuationToken) { nextPageRetrievals++; return super.byPage(continuationToken); } /* * Returns the number of times another page has been retrieved. */ int getNextPageRetrievals() { return nextPageRetrievals; } } }
I have no concerns, this is a great side affect of this change!
public void streamFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.streamByPage().limit(1).collect(Collectors.toList()).get(0)); assertEquals(0, pagedFlux.getNextPageRetrievals()); }
assertEquals(0, pagedFlux.getNextPageRetrievals());
public void streamFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.streamByPage().limit(1).collect(Collectors.toList()).get(0)); assertEquals(0, pagedFlux.getNextPageRetrievals()); }
class PagedIterableTest { private List<PagedResponse<Integer>> pagedResponses; private List<PagedResponse<String>> pagedStringResponses; private HttpHeaders httpHeaders = new HttpHeaders().put("header1", "value1").put("header2", "value2"); private HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, "http: private String deserializedHeaders = "header1,value1,header2,value2"; @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = pagedIterable.streamByPage().collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = new ArrayList<>(); pagedIterable.iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = pagedIterable.stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = new ArrayList<>(); pagedIterable.iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = pagedIterable.mapPage(String::valueOf).streamByPage() .collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = pagedIterable.mapPage(String::valueOf).stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @Test @Test public void iterateFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.iterableByPage().iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } @Test public void streamFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.stream().limit(1).collect(Collectors.toList()).get(0)); } @Test public void iterateFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } private PagedFlux<Integer> getIntegerPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new PagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private TestPagedFlux<Integer> getTestPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new TestPagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private void createPagedResponse(int numberOfPages) { pagedResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getItems, i)) .collect(Collectors.toList()); pagedStringResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getStringItems, i)) .collect(Collectors.toList()); } private <T> PagedResponseBase<String, T> createPagedResponse(HttpRequest httpRequest, HttpHeaders headers, String deserializedHeaders, int numberOfPages, Function<Integer, List<T>> valueSupplier, int i) { return new PagedResponseBase<>(httpRequest, 200, headers, valueSupplier.apply(i), (i < numberOfPages - 1) ? String.valueOf(i + 1) : null, deserializedHeaders); } private Mono<PagedResponse<Integer>> getNextPage(String continuationToken, List<PagedResponse<Integer>> pagedResponses) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } return Mono.just(pagedResponses.get(Integer.parseInt(continuationToken))); } private List<Integer> getItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().collect(Collectors.toList()); } private List<String> getStringItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().map(String::valueOf).collect(Collectors.toList()); } /* * Test class used to verify that paged iterable will lazily request next pages. */ private static class TestPagedFlux<T> extends PagedFlux<T> { private int nextPageRetrievals = 0; TestPagedFlux(Supplier<Mono<PagedResponse<T>>> firstPageRetriever, Function<String, Mono<PagedResponse<T>>> nextPageRetriever) { super(firstPageRetriever, nextPageRetriever); } @Override public Flux<PagedResponse<T>> byPage(String continuationToken) { nextPageRetrievals++; return super.byPage(continuationToken); } /* * Returns the number of times another page has been retrieved. */ int getNextPageRetrievals() { return nextPageRetrievals; } } }
class PagedIterableTest { private List<PagedResponse<Integer>> pagedResponses; private List<PagedResponse<String>> pagedStringResponses; private HttpHeaders httpHeaders = new HttpHeaders().put("header1", "value1").put("header2", "value2"); private HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, "http: private String deserializedHeaders = "header1,value1,header2,value2"; @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = pagedIterable.streamByPage().collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = new ArrayList<>(); pagedIterable.iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = pagedIterable.stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = new ArrayList<>(); pagedIterable.iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = pagedIterable.mapPage(String::valueOf).streamByPage() .collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = pagedIterable.mapPage(String::valueOf).stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @Test @Test public void iterateFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.iterableByPage().iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } @Test public void streamFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.stream().limit(1).collect(Collectors.toList()).get(0)); } @Test public void iterateFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } private PagedFlux<Integer> getIntegerPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new PagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private TestPagedFlux<Integer> getTestPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new TestPagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private void createPagedResponse(int numberOfPages) { pagedResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getItems, i)) .collect(Collectors.toList()); pagedStringResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getStringItems, i)) .collect(Collectors.toList()); } private <T> PagedResponseBase<String, T> createPagedResponse(HttpRequest httpRequest, HttpHeaders headers, String deserializedHeaders, int numberOfPages, Function<Integer, List<T>> valueSupplier, int i) { return new PagedResponseBase<>(httpRequest, 200, headers, valueSupplier.apply(i), (i < numberOfPages - 1) ? String.valueOf(i + 1) : null, deserializedHeaders); } private Mono<PagedResponse<Integer>> getNextPage(String continuationToken, List<PagedResponse<Integer>> pagedResponses) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } return Mono.just(pagedResponses.get(Integer.parseInt(continuationToken))); } private List<Integer> getItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().collect(Collectors.toList()); } private List<String> getStringItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().map(String::valueOf).collect(Collectors.toList()); } /* * Test class used to verify that paged iterable will lazily request next pages. */ private static class TestPagedFlux<T> extends PagedFlux<T> { private int nextPageRetrievals = 0; TestPagedFlux(Supplier<Mono<PagedResponse<T>>> firstPageRetriever, Function<String, Mono<PagedResponse<T>>> nextPageRetriever) { super(firstPageRetriever, nextPageRetriever); } @Override public Flux<PagedResponse<T>> byPage(String continuationToken) { nextPageRetrievals++; return super.byPage(continuationToken); } /* * Returns the number of times another page has been retrieved. */ int getNextPageRetrievals() { return nextPageRetrievals; } } }
Cool, thanks for the review Alan.
public void streamFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.streamByPage().limit(1).collect(Collectors.toList()).get(0)); assertEquals(0, pagedFlux.getNextPageRetrievals()); }
assertEquals(0, pagedFlux.getNextPageRetrievals());
public void streamFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.streamByPage().limit(1).collect(Collectors.toList()).get(0)); assertEquals(0, pagedFlux.getNextPageRetrievals()); }
class PagedIterableTest { private List<PagedResponse<Integer>> pagedResponses; private List<PagedResponse<String>> pagedStringResponses; private HttpHeaders httpHeaders = new HttpHeaders().put("header1", "value1").put("header2", "value2"); private HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, "http: private String deserializedHeaders = "header1,value1,header2,value2"; @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = pagedIterable.streamByPage().collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = new ArrayList<>(); pagedIterable.iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = pagedIterable.stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = new ArrayList<>(); pagedIterable.iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = pagedIterable.mapPage(String::valueOf).streamByPage() .collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = pagedIterable.mapPage(String::valueOf).stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @Test @Test public void iterateFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.iterableByPage().iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } @Test public void streamFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.stream().limit(1).collect(Collectors.toList()).get(0)); } @Test public void iterateFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } private PagedFlux<Integer> getIntegerPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new PagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private TestPagedFlux<Integer> getTestPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new TestPagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private void createPagedResponse(int numberOfPages) { pagedResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getItems, i)) .collect(Collectors.toList()); pagedStringResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getStringItems, i)) .collect(Collectors.toList()); } private <T> PagedResponseBase<String, T> createPagedResponse(HttpRequest httpRequest, HttpHeaders headers, String deserializedHeaders, int numberOfPages, Function<Integer, List<T>> valueSupplier, int i) { return new PagedResponseBase<>(httpRequest, 200, headers, valueSupplier.apply(i), (i < numberOfPages - 1) ? String.valueOf(i + 1) : null, deserializedHeaders); } private Mono<PagedResponse<Integer>> getNextPage(String continuationToken, List<PagedResponse<Integer>> pagedResponses) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } return Mono.just(pagedResponses.get(Integer.parseInt(continuationToken))); } private List<Integer> getItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().collect(Collectors.toList()); } private List<String> getStringItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().map(String::valueOf).collect(Collectors.toList()); } /* * Test class used to verify that paged iterable will lazily request next pages. */ private static class TestPagedFlux<T> extends PagedFlux<T> { private int nextPageRetrievals = 0; TestPagedFlux(Supplier<Mono<PagedResponse<T>>> firstPageRetriever, Function<String, Mono<PagedResponse<T>>> nextPageRetriever) { super(firstPageRetriever, nextPageRetriever); } @Override public Flux<PagedResponse<T>> byPage(String continuationToken) { nextPageRetrievals++; return super.byPage(continuationToken); } /* * Returns the number of times another page has been retrieved. */ int getNextPageRetrievals() { return nextPageRetrievals; } } }
class PagedIterableTest { private List<PagedResponse<Integer>> pagedResponses; private List<PagedResponse<String>> pagedStringResponses; private HttpHeaders httpHeaders = new HttpHeaders().put("header1", "value1").put("header2", "value2"); private HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, "http: private String deserializedHeaders = "header1,value1,header2,value2"; @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = pagedIterable.streamByPage().collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = new ArrayList<>(); pagedIterable.iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = pagedIterable.stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = new ArrayList<>(); pagedIterable.iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = pagedIterable.mapPage(String::valueOf).streamByPage() .collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = pagedIterable.mapPage(String::valueOf).stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @Test @Test public void iterateFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.iterableByPage().iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } @Test public void streamFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.stream().limit(1).collect(Collectors.toList()).get(0)); } @Test public void iterateFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } private PagedFlux<Integer> getIntegerPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new PagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private TestPagedFlux<Integer> getTestPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new TestPagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private void createPagedResponse(int numberOfPages) { pagedResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getItems, i)) .collect(Collectors.toList()); pagedStringResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getStringItems, i)) .collect(Collectors.toList()); } private <T> PagedResponseBase<String, T> createPagedResponse(HttpRequest httpRequest, HttpHeaders headers, String deserializedHeaders, int numberOfPages, Function<Integer, List<T>> valueSupplier, int i) { return new PagedResponseBase<>(httpRequest, 200, headers, valueSupplier.apply(i), (i < numberOfPages - 1) ? String.valueOf(i + 1) : null, deserializedHeaders); } private Mono<PagedResponse<Integer>> getNextPage(String continuationToken, List<PagedResponse<Integer>> pagedResponses) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } return Mono.just(pagedResponses.get(Integer.parseInt(continuationToken))); } private List<Integer> getItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().collect(Collectors.toList()); } private List<String> getStringItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().map(String::valueOf).collect(Collectors.toList()); } /* * Test class used to verify that paged iterable will lazily request next pages. */ private static class TestPagedFlux<T> extends PagedFlux<T> { private int nextPageRetrievals = 0; TestPagedFlux(Supplier<Mono<PagedResponse<T>>> firstPageRetriever, Function<String, Mono<PagedResponse<T>>> nextPageRetriever) { super(firstPageRetriever, nextPageRetriever); } @Override public Flux<PagedResponse<T>> byPage(String continuationToken) { nextPageRetrievals++; return super.byPage(continuationToken); } /* * Returns the number of times another page has been retrieved. */ int getNextPageRetrievals() { return nextPageRetrievals; } } }
and let's remove this FAQ entry once the PR is in: https://github.com/Azure/azure-sdk-for-java/wiki/Frequently-Asked-Questions#-why-does-synchronous-paged-iterator-fetches-2-pages-eagerly-when-iterating-by-page
public void streamFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.streamByPage().limit(1).collect(Collectors.toList()).get(0)); assertEquals(0, pagedFlux.getNextPageRetrievals()); }
assertEquals(0, pagedFlux.getNextPageRetrievals());
public void streamFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.streamByPage().limit(1).collect(Collectors.toList()).get(0)); assertEquals(0, pagedFlux.getNextPageRetrievals()); }
class PagedIterableTest { private List<PagedResponse<Integer>> pagedResponses; private List<PagedResponse<String>> pagedStringResponses; private HttpHeaders httpHeaders = new HttpHeaders().put("header1", "value1").put("header2", "value2"); private HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, "http: private String deserializedHeaders = "header1,value1,header2,value2"; @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = pagedIterable.streamByPage().collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = new ArrayList<>(); pagedIterable.iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = pagedIterable.stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = new ArrayList<>(); pagedIterable.iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = pagedIterable.mapPage(String::valueOf).streamByPage() .collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = pagedIterable.mapPage(String::valueOf).stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @Test @Test public void iterateFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.iterableByPage().iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } @Test public void streamFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.stream().limit(1).collect(Collectors.toList()).get(0)); } @Test public void iterateFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } private PagedFlux<Integer> getIntegerPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new PagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private TestPagedFlux<Integer> getTestPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new TestPagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private void createPagedResponse(int numberOfPages) { pagedResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getItems, i)) .collect(Collectors.toList()); pagedStringResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getStringItems, i)) .collect(Collectors.toList()); } private <T> PagedResponseBase<String, T> createPagedResponse(HttpRequest httpRequest, HttpHeaders headers, String deserializedHeaders, int numberOfPages, Function<Integer, List<T>> valueSupplier, int i) { return new PagedResponseBase<>(httpRequest, 200, headers, valueSupplier.apply(i), (i < numberOfPages - 1) ? String.valueOf(i + 1) : null, deserializedHeaders); } private Mono<PagedResponse<Integer>> getNextPage(String continuationToken, List<PagedResponse<Integer>> pagedResponses) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } return Mono.just(pagedResponses.get(Integer.parseInt(continuationToken))); } private List<Integer> getItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().collect(Collectors.toList()); } private List<String> getStringItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().map(String::valueOf).collect(Collectors.toList()); } /* * Test class used to verify that paged iterable will lazily request next pages. */ private static class TestPagedFlux<T> extends PagedFlux<T> { private int nextPageRetrievals = 0; TestPagedFlux(Supplier<Mono<PagedResponse<T>>> firstPageRetriever, Function<String, Mono<PagedResponse<T>>> nextPageRetriever) { super(firstPageRetriever, nextPageRetriever); } @Override public Flux<PagedResponse<T>> byPage(String continuationToken) { nextPageRetrievals++; return super.byPage(continuationToken); } /* * Returns the number of times another page has been retrieved. */ int getNextPageRetrievals() { return nextPageRetrievals; } } }
class PagedIterableTest { private List<PagedResponse<Integer>> pagedResponses; private List<PagedResponse<String>> pagedStringResponses; private HttpHeaders httpHeaders = new HttpHeaders().put("header1", "value1").put("header2", "value2"); private HttpRequest httpRequest = new HttpRequest(HttpMethod.GET, "http: private String deserializedHeaders = "header1,value1,header2,value2"; @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = pagedIterable.streamByPage().collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPage(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<Integer>> pages = new ArrayList<>(); pagedIterable.iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); assertEquals(pagedResponses, pages); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = pagedIterable.stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByT(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<Integer> values = new ArrayList<>(); pagedIterable.iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = pagedIterable.mapPage(String::valueOf).streamByPage() .collect(Collectors.toList()); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByPageMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<PagedResponse<String>> pages = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterableByPage().iterator().forEachRemaining(pages::add); assertEquals(numberOfPages, pages.size()); for (int i = 0; i < numberOfPages; i++) { assertEquals(pagedStringResponses.get(i).getValue(), pages.get(i).getValue()); } } @ParameterizedTest @ValueSource(ints = {0, 5}) public void streamByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = pagedIterable.mapPage(String::valueOf).stream().collect(Collectors.toList()); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @ParameterizedTest @ValueSource(ints = {0, 5}) public void iterateByTMap(int numberOfPages) { PagedFlux<Integer> pagedFlux = getIntegerPagedFlux(numberOfPages); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); List<String> values = new ArrayList<>(); pagedIterable.mapPage(String::valueOf).iterator().forEachRemaining(values::add); assertEquals(numberOfPages * 3, values.size()); assertEquals(Stream.iterate(0, i -> i + 1).limit(numberOfPages * 3).map(String::valueOf) .collect(Collectors.toList()), values); } @Test @Test public void iterateFirstPage() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); assertEquals(pagedResponses.get(0), pagedIterable.iterableByPage().iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } @Test public void streamFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.stream().limit(1).collect(Collectors.toList()).get(0)); } @Test public void iterateFirstValue() { TestPagedFlux<Integer> pagedFlux = getTestPagedFlux(5); PagedIterable<Integer> pagedIterable = new PagedIterable<>(pagedFlux); Integer firstValue = pagedResponses.get(0).getValue().get(0); assertEquals(firstValue, pagedIterable.iterator().next()); assertEquals(0, pagedFlux.getNextPageRetrievals()); } private PagedFlux<Integer> getIntegerPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new PagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private TestPagedFlux<Integer> getTestPagedFlux(int numberOfPages) { createPagedResponse(numberOfPages); return new TestPagedFlux<>(() -> pagedResponses.isEmpty() ? Mono.empty() : Mono.just(pagedResponses.get(0)), continuationToken -> getNextPage(continuationToken, pagedResponses)); } private void createPagedResponse(int numberOfPages) { pagedResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getItems, i)) .collect(Collectors.toList()); pagedStringResponses = IntStream.range(0, numberOfPages) .boxed() .map(i -> createPagedResponse(httpRequest, httpHeaders, deserializedHeaders, numberOfPages, this::getStringItems, i)) .collect(Collectors.toList()); } private <T> PagedResponseBase<String, T> createPagedResponse(HttpRequest httpRequest, HttpHeaders headers, String deserializedHeaders, int numberOfPages, Function<Integer, List<T>> valueSupplier, int i) { return new PagedResponseBase<>(httpRequest, 200, headers, valueSupplier.apply(i), (i < numberOfPages - 1) ? String.valueOf(i + 1) : null, deserializedHeaders); } private Mono<PagedResponse<Integer>> getNextPage(String continuationToken, List<PagedResponse<Integer>> pagedResponses) { if (continuationToken == null || continuationToken.isEmpty()) { return Mono.empty(); } return Mono.just(pagedResponses.get(Integer.parseInt(continuationToken))); } private List<Integer> getItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().collect(Collectors.toList()); } private List<String> getStringItems(int i) { return IntStream.range(i * 3, i * 3 + 3).boxed().map(String::valueOf).collect(Collectors.toList()); } /* * Test class used to verify that paged iterable will lazily request next pages. */ private static class TestPagedFlux<T> extends PagedFlux<T> { private int nextPageRetrievals = 0; TestPagedFlux(Supplier<Mono<PagedResponse<T>>> firstPageRetriever, Function<String, Mono<PagedResponse<T>>> nextPageRetriever) { super(firstPageRetriever, nextPageRetriever); } @Override public Flux<PagedResponse<T>> byPage(String continuationToken) { nextPageRetrievals++; return super.byPage(continuationToken); } /* * Returns the number of times another page has been retrieved. */ int getNextPageRetrievals() { return nextPageRetrievals; } } }
This can be simplified to: `this.isDone = token == null;`
public void customContinuationTokenSnippet() { class ContinuationState<C> { private C lastContinuationToken; private boolean isDone; ContinuationState(C token) { this.lastContinuationToken = token; } void setLastContinuationToken(C token) { this.isDone = token == null ? true : false; this.lastContinuationToken = token; } C getLastContinuationToken() { return this.lastContinuationToken; } boolean isDone() { return this.isDone; } } class FileContinuationToken { public int getNextLinkId() { return 0; } } class File { } class FilePage implements ContinuablePage<FileContinuationToken, File> { @Override public IterableStream<File> getElements() { return null; } @Override public FileContinuationToken getContinuationToken() { return null; } } class FileShareServiceClient { Flux<FilePage> getFilePages(FileContinuationToken token) { return null; } } FileShareServiceClient client = null; Supplier<PageRetriever<FileContinuationToken, FilePage>> pageRetrieverProvider = new Supplier<>() { @Override public PageRetriever<FileContinuationToken, FilePage> get() { return (continuationToken, pageSize) -> client.getFilePages(continuationToken); } }; class FilePagedFlux extends ContinuablePagedFluxCore<FileContinuationToken, File, FilePage> { FilePagedFlux(Supplier<PageRetriever<FileContinuationToken, FilePage>> pageRetrieverProvider) { super(pageRetrieverProvider); } } FilePagedFlux filePagedFlux = new FilePagedFlux(pageRetrieverProvider); }
this.isDone = token == null ? true : false;
public void customContinuationTokenSnippet() { class ContinuationState<C> { private C lastContinuationToken; private boolean isDone; ContinuationState(C token) { this.lastContinuationToken = token; } void setLastContinuationToken(C token) { this.isDone = token == null; this.lastContinuationToken = token; } C getLastContinuationToken() { return this.lastContinuationToken; } boolean isDone() { return this.isDone; } } class FileContinuationToken { public int getNextLinkId() { return 0; } } class File { } class FilePage implements ContinuablePage<FileContinuationToken, File> { @Override public IterableStream<File> getElements() { return null; } @Override public FileContinuationToken getContinuationToken() { return null; } } class FileShareServiceClient { Flux<FilePage> getFilePages(FileContinuationToken token) { return null; } } FileShareServiceClient client = null; Supplier<PageRetriever<FileContinuationToken, FilePage>> pageRetrieverProvider = new Supplier<PageRetriever<FileContinuationToken, FilePage>>() { @Override public PageRetriever<FileContinuationToken, FilePage> get() { return (continuationToken, pageSize) -> client.getFilePages(continuationToken); } }; class FilePagedFlux extends ContinuablePagedFluxCore<FileContinuationToken, File, FilePage> { FilePagedFlux(Supplier<PageRetriever<FileContinuationToken, FilePage>> pageRetrieverProvider) { super(pageRetrieverProvider); } } FilePagedFlux filePagedFlux = new FilePagedFlux(pageRetrieverProvider); }
class PagedFluxCoreJavaDocCodeSnippets { /** * Code snippets for extending from {@link ContinuablePagedFluxCore} and enabling custom continuation token. */ }
class PagedFluxCoreJavaDocCodeSnippets { /** * Code snippets for extending from {@link ContinuablePagedFluxCore} and enabling custom continuation token. */ }
yes, updated the PR.
public void customContinuationTokenSnippet() { class ContinuationState<C> { private C lastContinuationToken; private boolean isDone; ContinuationState(C token) { this.lastContinuationToken = token; } void setLastContinuationToken(C token) { this.isDone = token == null ? true : false; this.lastContinuationToken = token; } C getLastContinuationToken() { return this.lastContinuationToken; } boolean isDone() { return this.isDone; } } class FileContinuationToken { public int getNextLinkId() { return 0; } } class File { } class FilePage implements ContinuablePage<FileContinuationToken, File> { @Override public IterableStream<File> getElements() { return null; } @Override public FileContinuationToken getContinuationToken() { return null; } } class FileShareServiceClient { Flux<FilePage> getFilePages(FileContinuationToken token) { return null; } } FileShareServiceClient client = null; Supplier<PageRetriever<FileContinuationToken, FilePage>> pageRetrieverProvider = new Supplier<>() { @Override public PageRetriever<FileContinuationToken, FilePage> get() { return (continuationToken, pageSize) -> client.getFilePages(continuationToken); } }; class FilePagedFlux extends ContinuablePagedFluxCore<FileContinuationToken, File, FilePage> { FilePagedFlux(Supplier<PageRetriever<FileContinuationToken, FilePage>> pageRetrieverProvider) { super(pageRetrieverProvider); } } FilePagedFlux filePagedFlux = new FilePagedFlux(pageRetrieverProvider); }
this.isDone = token == null ? true : false;
public void customContinuationTokenSnippet() { class ContinuationState<C> { private C lastContinuationToken; private boolean isDone; ContinuationState(C token) { this.lastContinuationToken = token; } void setLastContinuationToken(C token) { this.isDone = token == null; this.lastContinuationToken = token; } C getLastContinuationToken() { return this.lastContinuationToken; } boolean isDone() { return this.isDone; } } class FileContinuationToken { public int getNextLinkId() { return 0; } } class File { } class FilePage implements ContinuablePage<FileContinuationToken, File> { @Override public IterableStream<File> getElements() { return null; } @Override public FileContinuationToken getContinuationToken() { return null; } } class FileShareServiceClient { Flux<FilePage> getFilePages(FileContinuationToken token) { return null; } } FileShareServiceClient client = null; Supplier<PageRetriever<FileContinuationToken, FilePage>> pageRetrieverProvider = new Supplier<PageRetriever<FileContinuationToken, FilePage>>() { @Override public PageRetriever<FileContinuationToken, FilePage> get() { return (continuationToken, pageSize) -> client.getFilePages(continuationToken); } }; class FilePagedFlux extends ContinuablePagedFluxCore<FileContinuationToken, File, FilePage> { FilePagedFlux(Supplier<PageRetriever<FileContinuationToken, FilePage>> pageRetrieverProvider) { super(pageRetrieverProvider); } } FilePagedFlux filePagedFlux = new FilePagedFlux(pageRetrieverProvider); }
class PagedFluxCoreJavaDocCodeSnippets { /** * Code snippets for extending from {@link ContinuablePagedFluxCore} and enabling custom continuation token. */ }
class PagedFluxCoreJavaDocCodeSnippets { /** * Code snippets for extending from {@link ContinuablePagedFluxCore} and enabling custom continuation token. */ }
Could introduce `IterableStream.of(List)` or similar to avoid null checks in these cases.
public IterableStream<ConfigurationSetting> getElements() { return items == null ? IterableStream.empty() : new IterableStream<>(items); }
return items == null ? IterableStream.empty() : new IterableStream<>(items);
public IterableStream<ConfigurationSetting> getElements() { return IterableStream.of(items); }
class ConfigurationSettingPage implements Page<ConfigurationSetting> { @JsonProperty("@nextLink") private String continuationToken; @JsonProperty("items") private List<ConfigurationSetting> items; /** * Gets the link to the next page. * * @return The link to the next page or {@code null} if there are no more resources to fetch. */ @Override public String getContinuationToken() { return this.continuationToken; } /** * Gets the iterable stream of {@link ConfigurationSetting ConfigurationSettings} on this page. * * @return The iterable stream of {@link ConfigurationSetting ConfigurationSettings}. */ @Override }
class ConfigurationSettingPage implements Page<ConfigurationSetting> { @JsonProperty("@nextLink") private String continuationToken; @JsonProperty("items") private List<ConfigurationSetting> items; /** * Gets the link to the next page. * * @return The link to the next page or {@code null} if there are no more resources to fetch. */ @Override public String getContinuationToken() { return this.continuationToken; } /** * Gets the iterable stream of {@link ConfigurationSetting ConfigurationSettings} on this page. * * @return The iterable stream of {@link ConfigurationSetting ConfigurationSettings}. */ @Override }
added `IterableStream.of(Iterable<T>)` factory method: if parameter to this factory is `null` then it return cached static empty stream otherwise it create a new `IterableStream` from the parameter.
public IterableStream<ConfigurationSetting> getElements() { return items == null ? IterableStream.empty() : new IterableStream<>(items); }
return items == null ? IterableStream.empty() : new IterableStream<>(items);
public IterableStream<ConfigurationSetting> getElements() { return IterableStream.of(items); }
class ConfigurationSettingPage implements Page<ConfigurationSetting> { @JsonProperty("@nextLink") private String continuationToken; @JsonProperty("items") private List<ConfigurationSetting> items; /** * Gets the link to the next page. * * @return The link to the next page or {@code null} if there are no more resources to fetch. */ @Override public String getContinuationToken() { return this.continuationToken; } /** * Gets the iterable stream of {@link ConfigurationSetting ConfigurationSettings} on this page. * * @return The iterable stream of {@link ConfigurationSetting ConfigurationSettings}. */ @Override }
class ConfigurationSettingPage implements Page<ConfigurationSetting> { @JsonProperty("@nextLink") private String continuationToken; @JsonProperty("items") private List<ConfigurationSetting> items; /** * Gets the link to the next page. * * @return The link to the next page or {@code null} if there are no more resources to fetch. */ @Override public String getContinuationToken() { return this.continuationToken; } /** * Gets the iterable stream of {@link ConfigurationSetting ConfigurationSettings} on this page. * * @return The iterable stream of {@link ConfigurationSetting ConfigurationSettings}. */ @Override }
Use logger.logAndThrow. Same to other places.
public static void httpsValidation(Object objectToCheck, String objectName, String endpoint, ClientLogger logger) { if (objectToCheck != null && !parseEndpoint(endpoint, logger).getScheme().equals(Constants.HTTPS)) { throw new IllegalArgumentException("Using a(n) " + objectName + " requires https"); } }
throw new IllegalArgumentException("Using a(n) " + objectName + " requires https");
public static void httpsValidation(Object objectToCheck, String objectName, String endpoint, ClientLogger logger) { if (objectToCheck != null && !parseEndpoint(endpoint, logger).getScheme().equals(Constants.HTTPS)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Using a(n) " + objectName + " requires https")); } }
class BuilderHelper { private static final String DEFAULT_USER_AGENT_NAME = "azure-storage-queue"; private static final String DEFAULT_USER_AGENT_VERSION = "12.1.0-beta.1"; private static final Pattern IP_URL_PATTERN = Pattern .compile("(?:\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})|(?:localhost)"); /** * Parse the endpoint for the account name, queue name, and SAS token query parameters. * * @param endpoint Endpoint to parse. * @param logger {@link ClientLogger} used to log any exception. * @return The parsed endpoint as a {@link QueueUrlParts}. */ public static QueueUrlParts parseEndpoint(String endpoint, ClientLogger logger) { Objects.requireNonNull(endpoint); try { URL url = new URL(endpoint); QueueUrlParts parts = new QueueUrlParts().setScheme(url.getProtocol()); if (IP_URL_PATTERN.matcher(url.getHost()).find()) { String path = url.getPath(); if (!CoreUtils.isNullOrEmpty(path) && path.charAt(0) == '/') { path = path.substring(1); } String[] pathPieces = path.split("/", 2); parts.setAccountName(pathPieces[0]); if (pathPieces.length == 2) { parts.setQueueName(pathPieces[1]); } parts.setEndpoint(String.format("%s: parts.getAccountName())); } else { String host = url.getHost(); String accountName = null; if (!CoreUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { accountName = host; } else { accountName = host.substring(0, accountNameIndex); } } parts.setAccountName(accountName); String[] pathSegments = url.getPath().split("/", 2); if (pathSegments.length == 2 && !CoreUtils.isNullOrEmpty(pathSegments[1])) { parts.setQueueName(pathSegments[1]); } parts.setEndpoint(String.format("%s: } String sasToken = new QueueServiceSasQueryParameters( StorageImplUtils.parseQueryStringSplitValues(url.getQuery()), false).encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { parts.setQueueName(sasToken); } return parts; } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Queue endpoint url is malformed.", ex)); } } /** * Constructs a {@link HttpPipeline} from values passed from a builder. * * @param storageSharedKeyCredential {@link StorageSharedKeyCredential} if present. * @param tokenCredential {@link TokenCredential} if present. * @param sasTokenCredential {@link SasTokenCredential} if present. * @param endpoint The endpoint for the client. * @param retryOptions Retry options to set in the retry policy. * @param logOptions Logging options to set in the logging policy. * @param httpClient HttpClient to use in the builder. * @param additionalPolicies Additional {@link HttpPipelinePolicy policies} to set in the pipeline. * @param configuration Configuration store contain environment settings. * @param logger {@link ClientLogger} used to log any exception. * @return A new {@link HttpPipeline} from the passed values. */ public static HttpPipeline buildPipeline(StorageSharedKeyCredential storageSharedKeyCredential, TokenCredential tokenCredential, SasTokenCredential sasTokenCredential, String endpoint, RequestRetryOptions retryOptions, HttpLogOptions logOptions, HttpClient httpClient, List<HttpPipelinePolicy> additionalPolicies, Configuration configuration, ClientLogger logger) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(getUserAgentPolicy(configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); HttpPipelinePolicy credentialPolicy; if (storageSharedKeyCredential != null) { credentialPolicy = new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential); } else if (tokenCredential != null) { httpsValidation(tokenCredential, "bearer token", endpoint, logger); credentialPolicy = new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)); } else if (sasTokenCredential != null) { credentialPolicy = new SasTokenCredentialPolicy(sasTokenCredential); } else { credentialPolicy = null; } if (credentialPolicy != null) { policies.add(credentialPolicy); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(getResponseValidationPolicy()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Gets the default http log option for Storage Queue. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { HttpLogOptions defaultOptions = new HttpLogOptions(); QueueHeadersAndQueryParameters.getQueueHeaders().forEach(defaultOptions::addAllowedHeaderName); QueueHeadersAndQueryParameters.getQueueQueryParameters().forEach(defaultOptions::addAllowedQueryParamName); return defaultOptions; } /* * Creates a {@link UserAgentPolicy} using the default blob module name and version. * * @param configuration Configuration store used to determine whether telemetry information should be included. * @return The default {@link UserAgentPolicy} for the module. */ private static UserAgentPolicy getUserAgentPolicy(Configuration configuration) { configuration = (configuration == null) ? Configuration.NONE : configuration; return new UserAgentPolicy(getDefaultHttpLogOptions().getApplicationId(), DEFAULT_USER_AGENT_NAME, DEFAULT_USER_AGENT_VERSION, configuration); } /* * Creates a {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} used to validate response data from * the service. * * @return The {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} for the module. */ private static HttpPipelinePolicy getResponseValidationPolicy() { return new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .build(); } /** * Validates that the client is properly configured to use https. * * @param objectToCheck The object to check for. * @param objectName The name of the object. * @param endpoint The endpoint for the client. * @param logger {@link ClientLogger} used to log any exception. */ public static class QueueUrlParts { private String scheme; private String endpoint; private String accountName; private String queueName; private String sasToken; public String getScheme() { return scheme; } public QueueUrlParts setScheme(String scheme) { this.scheme = scheme; return this; } public String getEndpoint() { return endpoint; } public QueueUrlParts setEndpoint(String endpoint) { this.endpoint = endpoint; return this; } public String getAccountName() { return accountName; } public QueueUrlParts setAccountName(String accountName) { this.accountName = accountName; return this; } public String getQueueName() { return queueName; } QueueUrlParts setQueueName(String queueName) { this.queueName = queueName; return this; } public String getSasToken() { return sasToken; } public QueueUrlParts setSasToken(String sasToken) { this.sasToken = sasToken; return this; } } }
class BuilderHelper { private static final String DEFAULT_USER_AGENT_NAME = "azure-storage-queue"; private static final String DEFAULT_USER_AGENT_VERSION = "12.1.0-beta.1"; private static final Pattern IP_URL_PATTERN = Pattern .compile("(?:\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})|(?:localhost)"); /** * Parse the endpoint for the account name, queue name, and SAS token query parameters. * * @param endpoint Endpoint to parse. * @param logger {@link ClientLogger} used to log any exception. * @return The parsed endpoint as a {@link QueueUrlParts}. */ public static QueueUrlParts parseEndpoint(String endpoint, ClientLogger logger) { Objects.requireNonNull(endpoint); try { URL url = new URL(endpoint); QueueUrlParts parts = new QueueUrlParts().setScheme(url.getProtocol()); if (IP_URL_PATTERN.matcher(url.getHost()).find()) { String path = url.getPath(); if (!CoreUtils.isNullOrEmpty(path) && path.charAt(0) == '/') { path = path.substring(1); } String[] pathPieces = path.split("/", 2); parts.setAccountName(pathPieces[0]); if (pathPieces.length == 2) { parts.setQueueName(pathPieces[1]); } parts.setEndpoint(String.format("%s: parts.getAccountName())); } else { String host = url.getHost(); String accountName = null; if (!CoreUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { accountName = host; } else { accountName = host.substring(0, accountNameIndex); } } parts.setAccountName(accountName); String[] pathSegments = url.getPath().split("/", 2); if (pathSegments.length == 2 && !CoreUtils.isNullOrEmpty(pathSegments[1])) { parts.setQueueName(pathSegments[1]); } parts.setEndpoint(String.format("%s: } String sasToken = new QueueServiceSasQueryParameters( StorageImplUtils.parseQueryStringSplitValues(url.getQuery()), false).encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { parts.setQueueName(sasToken); } return parts; } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Queue endpoint url is malformed.", ex)); } } /** * Constructs a {@link HttpPipeline} from values passed from a builder. * * @param storageSharedKeyCredential {@link StorageSharedKeyCredential} if present. * @param tokenCredential {@link TokenCredential} if present. * @param sasTokenCredential {@link SasTokenCredential} if present. * @param endpoint The endpoint for the client. * @param retryOptions Retry options to set in the retry policy. * @param logOptions Logging options to set in the logging policy. * @param httpClient HttpClient to use in the builder. * @param additionalPolicies Additional {@link HttpPipelinePolicy policies} to set in the pipeline. * @param configuration Configuration store contain environment settings. * @param logger {@link ClientLogger} used to log any exception. * @return A new {@link HttpPipeline} from the passed values. */ public static HttpPipeline buildPipeline(StorageSharedKeyCredential storageSharedKeyCredential, TokenCredential tokenCredential, SasTokenCredential sasTokenCredential, String endpoint, RequestRetryOptions retryOptions, HttpLogOptions logOptions, HttpClient httpClient, List<HttpPipelinePolicy> additionalPolicies, Configuration configuration, ClientLogger logger) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(getUserAgentPolicy(configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); HttpPipelinePolicy credentialPolicy; if (storageSharedKeyCredential != null) { credentialPolicy = new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential); } else if (tokenCredential != null) { httpsValidation(tokenCredential, "bearer token", endpoint, logger); credentialPolicy = new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)); } else if (sasTokenCredential != null) { credentialPolicy = new SasTokenCredentialPolicy(sasTokenCredential); } else { credentialPolicy = null; } if (credentialPolicy != null) { policies.add(credentialPolicy); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(getResponseValidationPolicy()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Gets the default http log option for Storage Queue. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { HttpLogOptions defaultOptions = new HttpLogOptions(); QueueHeadersAndQueryParameters.getQueueHeaders().forEach(defaultOptions::addAllowedHeaderName); QueueHeadersAndQueryParameters.getQueueQueryParameters().forEach(defaultOptions::addAllowedQueryParamName); return defaultOptions; } /* * Creates a {@link UserAgentPolicy} using the default blob module name and version. * * @param configuration Configuration store used to determine whether telemetry information should be included. * @return The default {@link UserAgentPolicy} for the module. */ private static UserAgentPolicy getUserAgentPolicy(Configuration configuration) { configuration = (configuration == null) ? Configuration.NONE : configuration; return new UserAgentPolicy(getDefaultHttpLogOptions().getApplicationId(), DEFAULT_USER_AGENT_NAME, DEFAULT_USER_AGENT_VERSION, configuration); } /* * Creates a {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} used to validate response data from * the service. * * @return The {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} for the module. */ private static HttpPipelinePolicy getResponseValidationPolicy() { return new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .build(); } /** * Validates that the client is properly configured to use https. * * @param objectToCheck The object to check for. * @param objectName The name of the object. * @param endpoint The endpoint for the client. * @param logger {@link ClientLogger} used to log any exception. */ public static class QueueUrlParts { private String scheme; private String endpoint; private String accountName; private String queueName; private String sasToken; public String getScheme() { return scheme; } public QueueUrlParts setScheme(String scheme) { this.scheme = scheme; return this; } public String getEndpoint() { return endpoint; } public QueueUrlParts setEndpoint(String endpoint) { this.endpoint = endpoint; return this; } public String getAccountName() { return accountName; } public QueueUrlParts setAccountName(String accountName) { this.accountName = accountName; return this; } public String getQueueName() { return queueName; } QueueUrlParts setQueueName(String queueName) { this.queueName = queueName; return this; } public String getSasToken() { return sasToken; } public QueueUrlParts setSasToken(String sasToken) { this.sasToken = sasToken; return this; } } }
Updated to use logger
public static void httpsValidation(Object objectToCheck, String objectName, String endpoint, ClientLogger logger) { if (objectToCheck != null && !parseEndpoint(endpoint, logger).getScheme().equals(Constants.HTTPS)) { throw new IllegalArgumentException("Using a(n) " + objectName + " requires https"); } }
throw new IllegalArgumentException("Using a(n) " + objectName + " requires https");
public static void httpsValidation(Object objectToCheck, String objectName, String endpoint, ClientLogger logger) { if (objectToCheck != null && !parseEndpoint(endpoint, logger).getScheme().equals(Constants.HTTPS)) { throw logger.logExceptionAsError(new IllegalArgumentException( "Using a(n) " + objectName + " requires https")); } }
class BuilderHelper { private static final String DEFAULT_USER_AGENT_NAME = "azure-storage-queue"; private static final String DEFAULT_USER_AGENT_VERSION = "12.1.0-beta.1"; private static final Pattern IP_URL_PATTERN = Pattern .compile("(?:\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})|(?:localhost)"); /** * Parse the endpoint for the account name, queue name, and SAS token query parameters. * * @param endpoint Endpoint to parse. * @param logger {@link ClientLogger} used to log any exception. * @return The parsed endpoint as a {@link QueueUrlParts}. */ public static QueueUrlParts parseEndpoint(String endpoint, ClientLogger logger) { Objects.requireNonNull(endpoint); try { URL url = new URL(endpoint); QueueUrlParts parts = new QueueUrlParts().setScheme(url.getProtocol()); if (IP_URL_PATTERN.matcher(url.getHost()).find()) { String path = url.getPath(); if (!CoreUtils.isNullOrEmpty(path) && path.charAt(0) == '/') { path = path.substring(1); } String[] pathPieces = path.split("/", 2); parts.setAccountName(pathPieces[0]); if (pathPieces.length == 2) { parts.setQueueName(pathPieces[1]); } parts.setEndpoint(String.format("%s: parts.getAccountName())); } else { String host = url.getHost(); String accountName = null; if (!CoreUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { accountName = host; } else { accountName = host.substring(0, accountNameIndex); } } parts.setAccountName(accountName); String[] pathSegments = url.getPath().split("/", 2); if (pathSegments.length == 2 && !CoreUtils.isNullOrEmpty(pathSegments[1])) { parts.setQueueName(pathSegments[1]); } parts.setEndpoint(String.format("%s: } String sasToken = new QueueServiceSasQueryParameters( StorageImplUtils.parseQueryStringSplitValues(url.getQuery()), false).encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { parts.setQueueName(sasToken); } return parts; } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Queue endpoint url is malformed.", ex)); } } /** * Constructs a {@link HttpPipeline} from values passed from a builder. * * @param storageSharedKeyCredential {@link StorageSharedKeyCredential} if present. * @param tokenCredential {@link TokenCredential} if present. * @param sasTokenCredential {@link SasTokenCredential} if present. * @param endpoint The endpoint for the client. * @param retryOptions Retry options to set in the retry policy. * @param logOptions Logging options to set in the logging policy. * @param httpClient HttpClient to use in the builder. * @param additionalPolicies Additional {@link HttpPipelinePolicy policies} to set in the pipeline. * @param configuration Configuration store contain environment settings. * @param logger {@link ClientLogger} used to log any exception. * @return A new {@link HttpPipeline} from the passed values. */ public static HttpPipeline buildPipeline(StorageSharedKeyCredential storageSharedKeyCredential, TokenCredential tokenCredential, SasTokenCredential sasTokenCredential, String endpoint, RequestRetryOptions retryOptions, HttpLogOptions logOptions, HttpClient httpClient, List<HttpPipelinePolicy> additionalPolicies, Configuration configuration, ClientLogger logger) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(getUserAgentPolicy(configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); HttpPipelinePolicy credentialPolicy; if (storageSharedKeyCredential != null) { credentialPolicy = new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential); } else if (tokenCredential != null) { httpsValidation(tokenCredential, "bearer token", endpoint, logger); credentialPolicy = new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)); } else if (sasTokenCredential != null) { credentialPolicy = new SasTokenCredentialPolicy(sasTokenCredential); } else { credentialPolicy = null; } if (credentialPolicy != null) { policies.add(credentialPolicy); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(getResponseValidationPolicy()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Gets the default http log option for Storage Queue. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { HttpLogOptions defaultOptions = new HttpLogOptions(); QueueHeadersAndQueryParameters.getQueueHeaders().forEach(defaultOptions::addAllowedHeaderName); QueueHeadersAndQueryParameters.getQueueQueryParameters().forEach(defaultOptions::addAllowedQueryParamName); return defaultOptions; } /* * Creates a {@link UserAgentPolicy} using the default blob module name and version. * * @param configuration Configuration store used to determine whether telemetry information should be included. * @return The default {@link UserAgentPolicy} for the module. */ private static UserAgentPolicy getUserAgentPolicy(Configuration configuration) { configuration = (configuration == null) ? Configuration.NONE : configuration; return new UserAgentPolicy(getDefaultHttpLogOptions().getApplicationId(), DEFAULT_USER_AGENT_NAME, DEFAULT_USER_AGENT_VERSION, configuration); } /* * Creates a {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} used to validate response data from * the service. * * @return The {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} for the module. */ private static HttpPipelinePolicy getResponseValidationPolicy() { return new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .build(); } /** * Validates that the client is properly configured to use https. * * @param objectToCheck The object to check for. * @param objectName The name of the object. * @param endpoint The endpoint for the client. * @param logger {@link ClientLogger} used to log any exception. */ public static class QueueUrlParts { private String scheme; private String endpoint; private String accountName; private String queueName; private String sasToken; public String getScheme() { return scheme; } public QueueUrlParts setScheme(String scheme) { this.scheme = scheme; return this; } public String getEndpoint() { return endpoint; } public QueueUrlParts setEndpoint(String endpoint) { this.endpoint = endpoint; return this; } public String getAccountName() { return accountName; } public QueueUrlParts setAccountName(String accountName) { this.accountName = accountName; return this; } public String getQueueName() { return queueName; } QueueUrlParts setQueueName(String queueName) { this.queueName = queueName; return this; } public String getSasToken() { return sasToken; } public QueueUrlParts setSasToken(String sasToken) { this.sasToken = sasToken; return this; } } }
class BuilderHelper { private static final String DEFAULT_USER_AGENT_NAME = "azure-storage-queue"; private static final String DEFAULT_USER_AGENT_VERSION = "12.1.0-beta.1"; private static final Pattern IP_URL_PATTERN = Pattern .compile("(?:\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})|(?:localhost)"); /** * Parse the endpoint for the account name, queue name, and SAS token query parameters. * * @param endpoint Endpoint to parse. * @param logger {@link ClientLogger} used to log any exception. * @return The parsed endpoint as a {@link QueueUrlParts}. */ public static QueueUrlParts parseEndpoint(String endpoint, ClientLogger logger) { Objects.requireNonNull(endpoint); try { URL url = new URL(endpoint); QueueUrlParts parts = new QueueUrlParts().setScheme(url.getProtocol()); if (IP_URL_PATTERN.matcher(url.getHost()).find()) { String path = url.getPath(); if (!CoreUtils.isNullOrEmpty(path) && path.charAt(0) == '/') { path = path.substring(1); } String[] pathPieces = path.split("/", 2); parts.setAccountName(pathPieces[0]); if (pathPieces.length == 2) { parts.setQueueName(pathPieces[1]); } parts.setEndpoint(String.format("%s: parts.getAccountName())); } else { String host = url.getHost(); String accountName = null; if (!CoreUtils.isNullOrEmpty(host)) { int accountNameIndex = host.indexOf('.'); if (accountNameIndex == -1) { accountName = host; } else { accountName = host.substring(0, accountNameIndex); } } parts.setAccountName(accountName); String[] pathSegments = url.getPath().split("/", 2); if (pathSegments.length == 2 && !CoreUtils.isNullOrEmpty(pathSegments[1])) { parts.setQueueName(pathSegments[1]); } parts.setEndpoint(String.format("%s: } String sasToken = new QueueServiceSasQueryParameters( StorageImplUtils.parseQueryStringSplitValues(url.getQuery()), false).encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { parts.setQueueName(sasToken); } return parts; } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage Queue endpoint url is malformed.", ex)); } } /** * Constructs a {@link HttpPipeline} from values passed from a builder. * * @param storageSharedKeyCredential {@link StorageSharedKeyCredential} if present. * @param tokenCredential {@link TokenCredential} if present. * @param sasTokenCredential {@link SasTokenCredential} if present. * @param endpoint The endpoint for the client. * @param retryOptions Retry options to set in the retry policy. * @param logOptions Logging options to set in the logging policy. * @param httpClient HttpClient to use in the builder. * @param additionalPolicies Additional {@link HttpPipelinePolicy policies} to set in the pipeline. * @param configuration Configuration store contain environment settings. * @param logger {@link ClientLogger} used to log any exception. * @return A new {@link HttpPipeline} from the passed values. */ public static HttpPipeline buildPipeline(StorageSharedKeyCredential storageSharedKeyCredential, TokenCredential tokenCredential, SasTokenCredential sasTokenCredential, String endpoint, RequestRetryOptions retryOptions, HttpLogOptions logOptions, HttpClient httpClient, List<HttpPipelinePolicy> additionalPolicies, Configuration configuration, ClientLogger logger) { List<HttpPipelinePolicy> policies = new ArrayList<>(); policies.add(getUserAgentPolicy(configuration)); policies.add(new RequestIdPolicy()); policies.add(new AddDatePolicy()); HttpPipelinePolicy credentialPolicy; if (storageSharedKeyCredential != null) { credentialPolicy = new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential); } else if (tokenCredential != null) { httpsValidation(tokenCredential, "bearer token", endpoint, logger); credentialPolicy = new BearerTokenAuthenticationPolicy(tokenCredential, String.format("%s/.default", endpoint)); } else if (sasTokenCredential != null) { credentialPolicy = new SasTokenCredentialPolicy(sasTokenCredential); } else { credentialPolicy = null; } if (credentialPolicy != null) { policies.add(credentialPolicy); } HttpPolicyProviders.addBeforeRetryPolicies(policies); policies.add(new RequestRetryPolicy(retryOptions)); policies.addAll(additionalPolicies); HttpPolicyProviders.addAfterRetryPolicies(policies); policies.add(getResponseValidationPolicy()); policies.add(new HttpLoggingPolicy(logOptions)); policies.add(new ScrubEtagPolicy()); return new HttpPipelineBuilder() .policies(policies.toArray(new HttpPipelinePolicy[0])) .httpClient(httpClient) .build(); } /** * Gets the default http log option for Storage Queue. * * @return the default http log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { HttpLogOptions defaultOptions = new HttpLogOptions(); QueueHeadersAndQueryParameters.getQueueHeaders().forEach(defaultOptions::addAllowedHeaderName); QueueHeadersAndQueryParameters.getQueueQueryParameters().forEach(defaultOptions::addAllowedQueryParamName); return defaultOptions; } /* * Creates a {@link UserAgentPolicy} using the default blob module name and version. * * @param configuration Configuration store used to determine whether telemetry information should be included. * @return The default {@link UserAgentPolicy} for the module. */ private static UserAgentPolicy getUserAgentPolicy(Configuration configuration) { configuration = (configuration == null) ? Configuration.NONE : configuration; return new UserAgentPolicy(getDefaultHttpLogOptions().getApplicationId(), DEFAULT_USER_AGENT_NAME, DEFAULT_USER_AGENT_VERSION, configuration); } /* * Creates a {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} used to validate response data from * the service. * * @return The {@link ResponseValidationPolicyBuilder.ResponseValidationPolicy} for the module. */ private static HttpPipelinePolicy getResponseValidationPolicy() { return new ResponseValidationPolicyBuilder() .addOptionalEcho(Constants.HeaderConstants.CLIENT_REQUEST_ID) .build(); } /** * Validates that the client is properly configured to use https. * * @param objectToCheck The object to check for. * @param objectName The name of the object. * @param endpoint The endpoint for the client. * @param logger {@link ClientLogger} used to log any exception. */ public static class QueueUrlParts { private String scheme; private String endpoint; private String accountName; private String queueName; private String sasToken; public String getScheme() { return scheme; } public QueueUrlParts setScheme(String scheme) { this.scheme = scheme; return this; } public String getEndpoint() { return endpoint; } public QueueUrlParts setEndpoint(String endpoint) { this.endpoint = endpoint; return this; } public String getAccountName() { return accountName; } public QueueUrlParts setAccountName(String accountName) { this.accountName = accountName; return this; } public String getQueueName() { return queueName; } QueueUrlParts setQueueName(String queueName) { this.queueName = queueName; return this; } public String getSasToken() { return sasToken; } public QueueUrlParts setSasToken(String sasToken) { this.sasToken = sasToken; return this; } } }
Should this be in messages.properties?
private static int sizeof(Object obj) { if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Integer) { return Integer.BYTES; } if (obj instanceof Long) { return Long.BYTES; } if (obj instanceof Short) { return Short.BYTES; } if (obj instanceof Character) { return Character.BYTES; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } throw new IllegalArgumentException(String.format("Encoding Type: %s is not supported", obj.getClass())); }
throw new IllegalArgumentException(String.format("Encoding Type: %s is not supported",
private static int sizeof(Object obj) { if (obj instanceof String) { return obj.toString().length() << 1; } if (obj instanceof Symbol) { return ((Symbol) obj).length() << 1; } if (obj instanceof Integer) { return Integer.BYTES; } if (obj instanceof Long) { return Long.BYTES; } if (obj instanceof Short) { return Short.BYTES; } if (obj instanceof Character) { return Character.BYTES; } if (obj instanceof Float) { return Float.BYTES; } if (obj instanceof Double) { return Double.BYTES; } throw new IllegalArgumentException(String.format(Messages.ENCODING_TYPE_NOT_SUPPORTED, obj.getClass())); }
class || clazz == EventHubProperties.class) { return deserializeManagementResponse(message, clazz); }
class || clazz == EventHubProperties.class) { return deserializeManagementResponse(message, clazz); }
Instead of having `flux` and `iterable` fields in this class, if an instance of IterableStream is created using an iterable, can this just be converted to `this.flux = Flux.fromIterable(Objects.requireNonNull(iterable, "'iterable' cannot be null."));`. Simplifies code in other methods too where you don't have to check if flux is null or iterable is null.
public IterableStream(Iterable<T> iterable) { this.iterable = Objects.requireNonNull(iterable, "'iterable' cannot be null."); this.flux = null; }
this.iterable = Objects.requireNonNull(iterable, "'iterable' cannot be null.");
public IterableStream(Iterable<T> iterable) { this.iterable = Objects.requireNonNull(iterable, "'iterable' cannot be null."); this.flux = null; }
class IterableStream<T> implements Iterable<T> { private final ClientLogger logger = new ClientLogger(IterableStream.class); private final Flux<T> flux; private final Iterable<T> iterable; /** * Creates an instance with the given {@link Flux}. * * @param flux Flux of items to iterate over. * @throws NullPointerException if {@code flux} is {@code null}. */ public IterableStream(Flux<T> flux) { this.flux = Objects.requireNonNull(flux, "'flux' cannot be null."); this.iterable = null; } /** * Creates an instance with the given {@link Iterable}. * * @param iterable Collection of items to iterate over. * @throws NullPointerException if {@code iterable} is {@code null}. */ /** * Utility function to provide {@link Stream} of value {@code T}. * It will provide the same stream of {@code T} values if called multiple times. * * @return {@link Stream} of value {@code T}. */ public Stream<T> stream() { if (flux != null) { return flux.toStream(); } else if (iterable != null) { return StreamSupport.stream(iterable.spliterator(), false); } else { logger.warning("IterableStream was not initialized with Iterable or Flux, returning empty stream."); return Stream.empty(); } } /** * Utility function to provide {@link Iterator} of value {@code T}. * It will provide same collection of {@code T} values if called multiple times. * * @return {@link Iterator} of value {@code T}. */ @Override public Iterator<T> iterator() { if (flux != null) { return flux.toIterable().iterator(); } else if (iterable != null) { return iterable.iterator(); } else { logger.warning("IterableStream was not initialized with Iterable or Flux, returning empty iterator."); return Collections.emptyIterator(); } } }
class IterableStream<T> implements Iterable<T> { private final ClientLogger logger = new ClientLogger(IterableStream.class); private final Flux<T> flux; private final Iterable<T> iterable; /** * Creates an instance with the given {@link Flux}. * * @param flux Flux of items to iterate over. * @throws NullPointerException if {@code flux} is {@code null}. */ public IterableStream(Flux<T> flux) { this.flux = Objects.requireNonNull(flux, "'flux' cannot be null."); this.iterable = null; } /** * Creates an instance with the given {@link Iterable}. * * @param iterable Collection of items to iterate over. * @throws NullPointerException if {@code iterable} is {@code null}. */ /** * Utility function to provide {@link Stream} of value {@code T}. * It will provide the same stream of {@code T} values if called multiple times. * * @return {@link Stream} of value {@code T}. */ public Stream<T> stream() { if (flux != null) { return flux.toStream(); } else if (iterable != null) { return StreamSupport.stream(iterable.spliterator(), false); } else { logger.warning("IterableStream was not initialized with Iterable or Flux, returning empty stream."); return Stream.empty(); } } /** * Utility function to provide {@link Iterator} of value {@code T}. * It will provide same collection of {@code T} values if called multiple times. * * @return {@link Iterator} of value {@code T}. */ @Override public Iterator<T> iterator() { if (flux != null) { return flux.toIterable().iterator(); } else if (iterable != null) { return iterable.iterator(); } else { logger.warning("IterableStream was not initialized with Iterable or Flux, returning empty iterator."); return Collections.emptyIterator(); } } }
Unfortunately this will result in that illegal state exception again because we are moving from sync to async world then back again. It was what the code was doing before this overload.
public IterableStream(Iterable<T> iterable) { this.iterable = Objects.requireNonNull(iterable, "'iterable' cannot be null."); this.flux = null; }
this.iterable = Objects.requireNonNull(iterable, "'iterable' cannot be null.");
public IterableStream(Iterable<T> iterable) { this.iterable = Objects.requireNonNull(iterable, "'iterable' cannot be null."); this.flux = null; }
class IterableStream<T> implements Iterable<T> { private final ClientLogger logger = new ClientLogger(IterableStream.class); private final Flux<T> flux; private final Iterable<T> iterable; /** * Creates an instance with the given {@link Flux}. * * @param flux Flux of items to iterate over. * @throws NullPointerException if {@code flux} is {@code null}. */ public IterableStream(Flux<T> flux) { this.flux = Objects.requireNonNull(flux, "'flux' cannot be null."); this.iterable = null; } /** * Creates an instance with the given {@link Iterable}. * * @param iterable Collection of items to iterate over. * @throws NullPointerException if {@code iterable} is {@code null}. */ /** * Utility function to provide {@link Stream} of value {@code T}. * It will provide the same stream of {@code T} values if called multiple times. * * @return {@link Stream} of value {@code T}. */ public Stream<T> stream() { if (flux != null) { return flux.toStream(); } else if (iterable != null) { return StreamSupport.stream(iterable.spliterator(), false); } else { logger.warning("IterableStream was not initialized with Iterable or Flux, returning empty stream."); return Stream.empty(); } } /** * Utility function to provide {@link Iterator} of value {@code T}. * It will provide same collection of {@code T} values if called multiple times. * * @return {@link Iterator} of value {@code T}. */ @Override public Iterator<T> iterator() { if (flux != null) { return flux.toIterable().iterator(); } else if (iterable != null) { return iterable.iterator(); } else { logger.warning("IterableStream was not initialized with Iterable or Flux, returning empty iterator."); return Collections.emptyIterator(); } } }
class IterableStream<T> implements Iterable<T> { private final ClientLogger logger = new ClientLogger(IterableStream.class); private final Flux<T> flux; private final Iterable<T> iterable; /** * Creates an instance with the given {@link Flux}. * * @param flux Flux of items to iterate over. * @throws NullPointerException if {@code flux} is {@code null}. */ public IterableStream(Flux<T> flux) { this.flux = Objects.requireNonNull(flux, "'flux' cannot be null."); this.iterable = null; } /** * Creates an instance with the given {@link Iterable}. * * @param iterable Collection of items to iterate over. * @throws NullPointerException if {@code iterable} is {@code null}. */ /** * Utility function to provide {@link Stream} of value {@code T}. * It will provide the same stream of {@code T} values if called multiple times. * * @return {@link Stream} of value {@code T}. */ public Stream<T> stream() { if (flux != null) { return flux.toStream(); } else if (iterable != null) { return StreamSupport.stream(iterable.spliterator(), false); } else { logger.warning("IterableStream was not initialized with Iterable or Flux, returning empty stream."); return Stream.empty(); } } /** * Utility function to provide {@link Iterator} of value {@code T}. * It will provide same collection of {@code T} values if called multiple times. * * @return {@link Iterator} of value {@code T}. */ @Override public Iterator<T> iterator() { if (flux != null) { return flux.toIterable().iterator(); } else if (iterable != null) { return iterable.iterator(); } else { logger.warning("IterableStream was not initialized with Iterable or Flux, returning empty iterator."); return Collections.emptyIterator(); } } }
nit: define a constant instead
public Stream<P> streamByPage() { return pagedFluxBase.byPage().toStream(1); }
return pagedFluxBase.byPage().toStream(1);
public Stream<P> streamByPage() { return pagedFluxBase.byPage().toStream(DEFAULT_BATCH_SIZE); }
class PagedIterableBase<T, P extends PagedResponse<T>> extends IterableStream<T> { private final PagedFluxBase<T, P> pagedFluxBase; /** * Creates instance given {@link PagedFluxBase}. * @param pagedFluxBase to use as iterable */ public PagedIterableBase(PagedFluxBase<T, P> pagedFluxBase) { super(pagedFluxBase); this.pagedFluxBase = pagedFluxBase; } /** * Retrieve the {@link Stream}, one page at a time. * It will provide same {@link Stream} of T values from starting if called multiple times. * * @return {@link Stream} of a Response that extends {@link PagedResponse} */ /** * Retrieve the {@link Stream}, one page at a time, starting from the next page associated with the given * continuation token. To start from first page, use {@link * * @param continuationToken The continuation token used to fetch the next page * @return {@link Stream} of a Response that extends {@link PagedResponse}, starting from the page associated * with the continuation token */ public Stream<P> streamByPage(String continuationToken) { return pagedFluxBase.byPage(continuationToken).toStream(1); } /** * Provides {@link Iterable} API for{ @link PagedResponse} * It will provide same collection of {@code T} values from starting if called multiple times. * * @return {@link Iterable} interface */ public Iterable<P> iterableByPage() { return pagedFluxBase.byPage().toIterable(1); } /** * Provides {@link Iterable} API for {@link PagedResponse}, starting from the next page associated with the given * continuation token. To start from first page, use {@link * It will provide same collection of T values from starting if called multiple times. * * @param continuationToken The continuation token used to fetch the next page * @return {@link Iterable} interface */ public Iterable<P> iterableByPage(String continuationToken) { return pagedFluxBase.byPage(continuationToken).toIterable(1); } }
class PagedIterableBase<T, P extends PagedResponse<T>> extends IterableStream<T> { /* * This is the default batch size that will be requested when using stream or iterable by page, this will indicate * to Reactor how many elements should be prefetched before another batch is requested. */ private static final int DEFAULT_BATCH_SIZE = 1; private final PagedFluxBase<T, P> pagedFluxBase; /** * Creates instance given {@link PagedFluxBase}. * @param pagedFluxBase to use as iterable */ public PagedIterableBase(PagedFluxBase<T, P> pagedFluxBase) { super(pagedFluxBase); this.pagedFluxBase = pagedFluxBase; } /** * Retrieve the {@link Stream}, one page at a time. * It will provide same {@link Stream} of T values from starting if called multiple times. * * @return {@link Stream} of a Response that extends {@link PagedResponse} */ /** * Retrieve the {@link Stream}, one page at a time, starting from the next page associated with the given * continuation token. To start from first page, use {@link * * @param continuationToken The continuation token used to fetch the next page * @return {@link Stream} of a Response that extends {@link PagedResponse}, starting from the page associated * with the continuation token */ public Stream<P> streamByPage(String continuationToken) { return pagedFluxBase.byPage(continuationToken).toStream(DEFAULT_BATCH_SIZE); } /** * Provides {@link Iterable} API for{ @link PagedResponse} * It will provide same collection of {@code T} values from starting if called multiple times. * * @return {@link Iterable} interface */ public Iterable<P> iterableByPage() { return pagedFluxBase.byPage().toIterable(DEFAULT_BATCH_SIZE); } /** * Provides {@link Iterable} API for {@link PagedResponse}, starting from the next page associated with the given * continuation token. To start from first page, use {@link * It will provide same collection of T values from starting if called multiple times. * * @param continuationToken The continuation token used to fetch the next page * @return {@link Iterable} interface */ public Iterable<P> iterableByPage(String continuationToken) { return pagedFluxBase.byPage(continuationToken).toIterable(DEFAULT_BATCH_SIZE); } }
Should this just make a single call to CoreUtils?
public void testProperties() { assertNotNull(CoreUtils.getUserAgentPropertiesFromProperties("azure-core.properties").getVersion()); assertNotNull(CoreUtils.getUserAgentPropertiesFromProperties("azure-core.properties").getName()); assertTrue(CoreUtils.getUserAgentPropertiesFromProperties("azure-core.properties").getVersion() .matches("\\d.\\d.\\d([-a-zA-Z0-9.])*")); }
assertTrue(CoreUtils.getUserAgentPropertiesFromProperties("azure-core.properties").getVersion()
public void testProperties() { UserAgentProperties properties = CoreUtils.getUserAgentProperties("azure-core.properties"); assertFalse(properties.getName().matches("UnknownName")); assertTrue(CoreUtils.getUserAgentProperties("azure-core.properties").getVersion() .matches("\\d.\\d.\\d([-a-zA-Z0-9.])*")); }
class CoreUtilsTests { @Test public void findFirstOfTypeEmptyArgs() { assertNull(CoreUtils.findFirstOfType(null, Integer.class)); } @Test public void findFirstOfTypeWithOneOfType() { int expected = 1; Object[] args = { "string", expected }; int actual = CoreUtils.findFirstOfType(args, Integer.class); Assertions.assertEquals(expected, actual); } @Test public void findFirstOfTypeWithMultipleOfType() { int expected = 1; Object[] args = { "string", expected, 10 }; int actual = CoreUtils.findFirstOfType(args, Integer.class); Assertions.assertEquals(expected, actual); } @Test public void findFirstOfTypeWithNoneOfType() { Object[] args = { "string", "anotherString" }; assertNull(CoreUtils.findFirstOfType(args, Integer.class)); } @Test @Test public void testMissingProperties() { assertTrue(CoreUtils.getUserAgentPropertiesFromProperties("foo.properties") .getVersion().matches("UnknownVersion")); assertTrue(CoreUtils.getUserAgentPropertiesFromProperties("foo.properties") .getName().matches("UnknownName")); } }
class CoreUtilsTests { @Test public void findFirstOfTypeEmptyArgs() { assertNull(CoreUtils.findFirstOfType(null, Integer.class)); } @Test public void findFirstOfTypeWithOneOfType() { int expected = 1; Object[] args = { "string", expected }; int actual = CoreUtils.findFirstOfType(args, Integer.class); Assertions.assertEquals(expected, actual); } @Test public void findFirstOfTypeWithMultipleOfType() { int expected = 1; Object[] args = { "string", expected, 10 }; int actual = CoreUtils.findFirstOfType(args, Integer.class); Assertions.assertEquals(expected, actual); } @Test public void findFirstOfTypeWithNoneOfType() { Object[] args = { "string", "anotherString" }; assertNull(CoreUtils.findFirstOfType(args, Integer.class)); } @Test @Test public void testMissingProperties() { assertTrue(CoreUtils.getUserAgentProperties("foo.properties") .getVersion().matches("UnknownVersion")); assertTrue(CoreUtils.getUserAgentProperties("foo.properties") .getName().matches("UnknownName")); } }
Done. Will push in with another commit
public void testProperties() { assertNotNull(CoreUtils.getUserAgentPropertiesFromProperties("azure-core.properties").getVersion()); assertNotNull(CoreUtils.getUserAgentPropertiesFromProperties("azure-core.properties").getName()); assertTrue(CoreUtils.getUserAgentPropertiesFromProperties("azure-core.properties").getVersion() .matches("\\d.\\d.\\d([-a-zA-Z0-9.])*")); }
assertTrue(CoreUtils.getUserAgentPropertiesFromProperties("azure-core.properties").getVersion()
public void testProperties() { UserAgentProperties properties = CoreUtils.getUserAgentProperties("azure-core.properties"); assertFalse(properties.getName().matches("UnknownName")); assertTrue(CoreUtils.getUserAgentProperties("azure-core.properties").getVersion() .matches("\\d.\\d.\\d([-a-zA-Z0-9.])*")); }
class CoreUtilsTests { @Test public void findFirstOfTypeEmptyArgs() { assertNull(CoreUtils.findFirstOfType(null, Integer.class)); } @Test public void findFirstOfTypeWithOneOfType() { int expected = 1; Object[] args = { "string", expected }; int actual = CoreUtils.findFirstOfType(args, Integer.class); Assertions.assertEquals(expected, actual); } @Test public void findFirstOfTypeWithMultipleOfType() { int expected = 1; Object[] args = { "string", expected, 10 }; int actual = CoreUtils.findFirstOfType(args, Integer.class); Assertions.assertEquals(expected, actual); } @Test public void findFirstOfTypeWithNoneOfType() { Object[] args = { "string", "anotherString" }; assertNull(CoreUtils.findFirstOfType(args, Integer.class)); } @Test @Test public void testMissingProperties() { assertTrue(CoreUtils.getUserAgentPropertiesFromProperties("foo.properties") .getVersion().matches("UnknownVersion")); assertTrue(CoreUtils.getUserAgentPropertiesFromProperties("foo.properties") .getName().matches("UnknownName")); } }
class CoreUtilsTests { @Test public void findFirstOfTypeEmptyArgs() { assertNull(CoreUtils.findFirstOfType(null, Integer.class)); } @Test public void findFirstOfTypeWithOneOfType() { int expected = 1; Object[] args = { "string", expected }; int actual = CoreUtils.findFirstOfType(args, Integer.class); Assertions.assertEquals(expected, actual); } @Test public void findFirstOfTypeWithMultipleOfType() { int expected = 1; Object[] args = { "string", expected, 10 }; int actual = CoreUtils.findFirstOfType(args, Integer.class); Assertions.assertEquals(expected, actual); } @Test public void findFirstOfTypeWithNoneOfType() { Object[] args = { "string", "anotherString" }; assertNull(CoreUtils.findFirstOfType(args, Integer.class)); } @Test @Test public void testMissingProperties() { assertTrue(CoreUtils.getUserAgentProperties("foo.properties") .getVersion().matches("UnknownVersion")); assertTrue(CoreUtils.getUserAgentProperties("foo.properties") .getName().matches("UnknownName")); } }
`UnknownName` and `UnknownVersion` can also be made constants.
public static UserAgentProperties getUserAgentProperties(String propertiesFileName) { Map<String, String> propertyMap = getProperties(propertiesFileName); String name = propertyMap.getOrDefault(NAME, "UnknownName"); String version = propertyMap.getOrDefault(VERSION, "UnknownVersion"); return new UserAgentProperties(name, version); }
String name = propertyMap.getOrDefault(NAME, "UnknownName");
public static UserAgentProperties getUserAgentProperties(String propertiesFileName) { Map<String, String> propertyMap = getProperties(propertiesFileName); String name = propertyMap.getOrDefault(NAME, UNKNOWN_NAME); String version = propertyMap.getOrDefault(VERSION, UNKNOWN_VERSION); return new UserAgentProperties(name, version); }
class from an array of Objects. * @param args Array of objects to search through to find the first instance of the given `clazz` type. * @param clazz The type trying to be found. * @param <T> Generic type * @return The first object of the desired type, otherwise null. */ public static <T> T findFirstOfType(Object[] args, Class<T> clazz) { if (isNullOrEmpty(args)) { return null; } for (Object arg : args) { if (clazz.isInstance(arg)) { return clazz.cast(arg); } } return null; }
class from an array of Objects. * @param args Array of objects to search through to find the first instance of the given `clazz` type. * @param clazz The type trying to be found. * @param <T> Generic type * @return The first object of the desired type, otherwise null. */ public static <T> T findFirstOfType(Object[] args, Class<T> clazz) { if (isNullOrEmpty(args)) { return null; } for (Object arg : args) { if (clazz.isInstance(arg)) { return clazz.cast(arg); } } return null; }
Done.
public static UserAgentProperties getUserAgentProperties(String propertiesFileName) { Map<String, String> propertyMap = getProperties(propertiesFileName); String name = propertyMap.getOrDefault(NAME, "UnknownName"); String version = propertyMap.getOrDefault(VERSION, "UnknownVersion"); return new UserAgentProperties(name, version); }
String name = propertyMap.getOrDefault(NAME, "UnknownName");
public static UserAgentProperties getUserAgentProperties(String propertiesFileName) { Map<String, String> propertyMap = getProperties(propertiesFileName); String name = propertyMap.getOrDefault(NAME, UNKNOWN_NAME); String version = propertyMap.getOrDefault(VERSION, UNKNOWN_VERSION); return new UserAgentProperties(name, version); }
class from an array of Objects. * @param args Array of objects to search through to find the first instance of the given `clazz` type. * @param clazz The type trying to be found. * @param <T> Generic type * @return The first object of the desired type, otherwise null. */ public static <T> T findFirstOfType(Object[] args, Class<T> clazz) { if (isNullOrEmpty(args)) { return null; } for (Object arg : args) { if (clazz.isInstance(arg)) { return clazz.cast(arg); } } return null; }
class from an array of Objects. * @param args Array of objects to search through to find the first instance of the given `clazz` type. * @param clazz The type trying to be found. * @param <T> Generic type * @return The first object of the desired type, otherwise null. */ public static <T> T findFirstOfType(Object[] args, Class<T> clazz) { if (isNullOrEmpty(args)) { return null; } for (Object arg : args) { if (clazz.isInstance(arg)) { return clazz.cast(arg); } } return null; }
addressed
void run() throws Exception { successMeter = metricsRegistry.meter(" failureMeter = metricsRegistry.meter(" switch (configuration.getOperationType()) { case ReadLatency: case Mixed: latency = metricsRegistry.timer("Latency"); break; default: break; } reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; for ( i = 0; shouldContinue(startTime, i); i++) { ResultHandler<T, Throwable> resultHandler = new ResultHandler<T, Throwable>() { @Override public T apply(T t, Throwable throwable) { successMeter.mark(); concurrencyControlSemaphore.release(); if (t != null) { assert(throwable == null); SyncBenchmark.this.onSuccess(); synchronized (count) { count.incrementAndGet(); count.notify(); } } else { assert(throwable != null); failureMeter.mark(); logger.error("Encountered failure {} on thread {}" , throwable.getMessage(), Thread.currentThread().getName(), throwable); concurrencyControlSemaphore.release(); SyncBenchmark.this.onError(throwable); synchronized (count) { count.incrementAndGet(); count.notify(); } } return t; } }; concurrencyControlSemaphore.acquire(); final long cnt = i; switch (configuration.getOperationType()) { case ReadLatency: LatencyListener latencyListener = new LatencyListener(resultHandler, latency); latencyListener.context = latency.time(); resultHandler = latencyListener; break; default: break; } final ResultHandler<T, Throwable> finalResultHandler = resultHandler; CompletableFuture futureResult = CompletableFuture.supplyAsync(() -> { try { finalResultHandler.init(); return performWorkload(cnt); } catch (Exception e) { throw propagate(e); } }, executorService); futureResult.handle(resultHandler); } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); }
LatencyListener latencyListener = new LatencyListener(resultHandler, latency);
void run() throws Exception { successMeter = metricsRegistry.meter(" failureMeter = metricsRegistry.meter(" switch (configuration.getOperationType()) { case ReadLatency: case Mixed: latency = metricsRegistry.timer("Latency"); break; default: break; } reporter.start(configuration.getPrintingInterval(), TimeUnit.SECONDS); long startTime = System.currentTimeMillis(); AtomicLong count = new AtomicLong(0); long i; for ( i = 0; shouldContinue(startTime, i); i++) { ResultHandler<T, Throwable> resultHandler = new ResultHandler<T, Throwable>() { @Override public T apply(T t, Throwable throwable) { successMeter.mark(); concurrencyControlSemaphore.release(); if (t != null) { assert(throwable == null); SyncBenchmark.this.onSuccess(); synchronized (count) { count.incrementAndGet(); count.notify(); } } else { assert(throwable != null); failureMeter.mark(); logger.error("Encountered failure {} on thread {}" , throwable.getMessage(), Thread.currentThread().getName(), throwable); concurrencyControlSemaphore.release(); SyncBenchmark.this.onError(throwable); synchronized (count) { count.incrementAndGet(); count.notify(); } } return t; } }; concurrencyControlSemaphore.acquire(); final long cnt = i; switch (configuration.getOperationType()) { case ReadLatency: LatencyListener<T> latencyListener = new LatencyListener(resultHandler, latency); latencyListener.context = latency.time(); resultHandler = latencyListener; break; default: break; } final ResultHandler<T, Throwable> finalResultHandler = resultHandler; CompletableFuture<T> futureResult = CompletableFuture.supplyAsync(() -> { try { finalResultHandler.init(); return performWorkload(cnt); } catch (Exception e) { throw propagate(e); } }, executorService); futureResult.handle(resultHandler); } synchronized (count) { while (count.get() < i) { count.wait(); } } long endTime = System.currentTimeMillis(); logger.info("[{}] operations performed in [{}] seconds.", configuration.getNumberOfOperations(), (int) ((endTime - startTime) / 1000)); reporter.report(); reporter.close(); }
class LatencyListener<T> extends ResultHandler<T, Throwable> { private final ResultHandler<T, Throwable> baseFunction; Timer.Context context; LatencyListener(ResultHandler<T, Throwable> baseFunction, Timer latency) { this.baseFunction = baseFunction; } protected void init() { super.init(); context = latency.time(); } @Override public T apply(T o, Throwable throwable) { context.stop(); return baseFunction.apply(o, throwable); } }
class LatencyListener<T> extends ResultHandler<T, Throwable> { private final ResultHandler<T, Throwable> baseFunction; private final Timer latencyTimer; Timer.Context context; LatencyListener(ResultHandler<T, Throwable> baseFunction, Timer latencyTimer) { this.baseFunction = baseFunction; this.latencyTimer = latencyTimer; } protected void init() { super.init(); context = latencyTimer.time(); } @Override public T apply(T o, Throwable throwable) { context.stop(); return baseFunction.apply(o, throwable); } }
This is a good question that has gone unanswered for now, @rickle-msft thoughts? These could just be thrown back into the query string when the SAS token is generated, that way we are able to clean the SAS token using the SAS query parameters classes but don't lose custom query parameters unrelated to SAS tokens.
public ShareClientBuilder endpoint(String endpoint) { try { URL fullUrl = new URL(endpoint); this.endpoint = fullUrl.getProtocol() + ": this.accountName = BuilderHelper.getAccountName(fullUrl); String[] pathSegments = fullUrl.getPath().split("/"); int length = pathSegments.length; if (length > 3) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot accept a URL to a file or directory to construct a file share client")); } this.shareName = length >= 2 ? pathSegments[1] : this.shareName; String sasToken = new CommonSasQueryParameters( StorageImplUtils.parseQueryStringSplitValues(fullUrl.getQuery()), false).encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage File Service endpoint url is malformed.")); } return this; }
public ShareClientBuilder endpoint(String endpoint) { try { URL fullUrl = new URL(endpoint); this.endpoint = fullUrl.getProtocol() + ": this.accountName = BuilderHelper.getAccountName(fullUrl); String[] pathSegments = fullUrl.getPath().split("/"); int length = pathSegments.length; if (length > 3) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot accept a URL to a file or directory to construct a file share client")); } this.shareName = length >= 2 ? pathSegments[1] : this.shareName; String sasToken = new CommonSasQueryParameters( StorageImplUtils.parseQueryStringSplitValues(fullUrl.getQuery()), false).encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage File Service endpoint url is malformed.")); } return this; }
class ShareClientBuilder { private final ClientLogger logger = new ClientLogger(ShareClientBuilder.class); private String endpoint; private String accountName; private String shareName; private String snapshot; private StorageSharedKeyCredential storageSharedKeyCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private ShareServiceVersion version; /** * Creates a builder instance that is able to configure and construct {@link ShareClient ShareClients} and {@link * ShareAsyncClient ShareAsyncClients}. */ public ShareClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * Creates a {@link ShareAsyncClient} based on options set in the builder. Every time {@code buildAsyncClient()} is * called a new instance of {@link ShareAsyncClient} is created. * * <p> * If {@link ShareClientBuilder * ShareClientBuilder * builder settings are ignored. * </p> * * @return A ShareAsyncClient with the options set from the builder. * @throws NullPointerException If {@code shareName} is {@code null}. * @throws IllegalArgumentException If neither a {@link StorageSharedKeyCredential} or * {@link */ public ShareAsyncClient buildAsyncClient() { Objects.requireNonNull(shareName, "'shareName' cannot be null."); ShareServiceVersion serviceVersion = version != null ? version : ShareServiceVersion.getLatest(); HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline(() -> { if (storageSharedKeyCredential != null) { return new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential); } else if (sasTokenCredential != null) { return new SasTokenCredentialPolicy(sasTokenCredential); } else { throw logger.logExceptionAsError( new IllegalArgumentException("Credentials are required for authorization")); } }, retryOptions, logOptions, httpClient, additionalPolicies, configuration); AzureFileStorageImpl azureFileStorage = new AzureFileStorageBuilder() .url(endpoint) .pipeline(pipeline) .version(serviceVersion.getVersion()) .build(); return new ShareAsyncClient(azureFileStorage, shareName, snapshot, accountName, serviceVersion); } /** * Creates a {@link ShareClient} based on options set in the builder. Every time {@code buildClient()} is called a * new instance of {@link ShareClient} is created. * * <p> * If {@link ShareClientBuilder * ShareClientBuilder * builder settings are ignored. * </p> * * @return A ShareClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} or {@code shareName} is {@code null}. * @throws IllegalStateException If neither a {@link StorageSharedKeyCredential} * or {@link */ public ShareClient buildClient() { return new ShareClient(buildAsyncClient()); } /** * Sets the endpoint for the Azure Storage File instance that the client will interact with. * * <p>The first path segment, if the endpoint contains path segments, will be assumed to be the name of the share * that the client will interact with.</p> * * <p>Query parameters of the endpoint will be parsed in an attempt to generate a SAS token to authenticate * requests sent to the service.</p> * * @param endpoint The URL of the Azure Storage File instance to send service requests to and receive responses * from. * @return the updated ShareClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is an invalid URL */ /** * Sets the share that the constructed clients will interact with * * @param shareName Name of the share * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code shareName} is {@code null}. */ public ShareClientBuilder shareName(String shareName) { this.shareName = Objects.requireNonNull(shareName, "'shareName' cannot be null."); return this; } /** * Sets the snapshot that the constructed clients will interact with. This snapshot must be linked to the share that * has been specified in the builder. * * @param snapshot Identifier of the snapshot * @return the updated ShareClientBuilder object */ public ShareClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated ShareClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public ShareClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated ShareClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public ShareClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated ShareClientBuilder * @throws IllegalArgumentException If {@code connectionString} in invalid. * @throws NullPointerException If {@code connectionString} is {@code null}. */ public ShareClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, logger); StorageEndpoint endpoint = storageConnectionString.getFileEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw logger .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive file service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated ShareClientBuilder object */ public ShareClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. The policy will be added after the retry policy. If * the method is called multiple times, all policies will be added and their order preserved. * * @param pipelinePolicy a pipeline policy * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public ShareClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public ShareClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default log options with Storage headers and query parameters. * * @return the default log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated ShareClientBuilder object */ public ShareClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public ShareClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated ShareClientBuilder object */ public ShareClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link ShareServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ShareServiceVersion} of the service to be used when making requests. * @return the updated ShareClientBuilder object */ public ShareClientBuilder serviceVersion(ShareServiceVersion version) { this.version = version; return this; } }
class ShareClientBuilder { private final ClientLogger logger = new ClientLogger(ShareClientBuilder.class); private String endpoint; private String accountName; private String shareName; private String snapshot; private StorageSharedKeyCredential storageSharedKeyCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private ShareServiceVersion version; /** * Creates a builder instance that is able to configure and construct {@link ShareClient ShareClients} and {@link * ShareAsyncClient ShareAsyncClients}. */ public ShareClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * Creates a {@link ShareAsyncClient} based on options set in the builder. Every time {@code buildAsyncClient()} is * called a new instance of {@link ShareAsyncClient} is created. * * <p> * If {@link ShareClientBuilder * ShareClientBuilder * builder settings are ignored. * </p> * * @return A ShareAsyncClient with the options set from the builder. * @throws NullPointerException If {@code shareName} is {@code null}. * @throws IllegalArgumentException If neither a {@link StorageSharedKeyCredential} or * {@link */ public ShareAsyncClient buildAsyncClient() { Objects.requireNonNull(shareName, "'shareName' cannot be null."); ShareServiceVersion serviceVersion = version != null ? version : ShareServiceVersion.getLatest(); HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline(() -> { if (storageSharedKeyCredential != null) { return new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential); } else if (sasTokenCredential != null) { return new SasTokenCredentialPolicy(sasTokenCredential); } else { throw logger.logExceptionAsError( new IllegalArgumentException("Credentials are required for authorization")); } }, retryOptions, logOptions, httpClient, additionalPolicies, configuration); AzureFileStorageImpl azureFileStorage = new AzureFileStorageBuilder() .url(endpoint) .pipeline(pipeline) .version(serviceVersion.getVersion()) .build(); return new ShareAsyncClient(azureFileStorage, shareName, snapshot, accountName, serviceVersion); } /** * Creates a {@link ShareClient} based on options set in the builder. Every time {@code buildClient()} is called a * new instance of {@link ShareClient} is created. * * <p> * If {@link ShareClientBuilder * ShareClientBuilder * builder settings are ignored. * </p> * * @return A ShareClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} or {@code shareName} is {@code null}. * @throws IllegalStateException If neither a {@link StorageSharedKeyCredential} * or {@link */ public ShareClient buildClient() { return new ShareClient(buildAsyncClient()); } /** * Sets the endpoint for the Azure Storage File instance that the client will interact with. * * <p>The first path segment, if the endpoint contains path segments, will be assumed to be the name of the share * that the client will interact with.</p> * * <p>Query parameters of the endpoint will be parsed in an attempt to generate a SAS token to authenticate * requests sent to the service.</p> * * @param endpoint The URL of the Azure Storage File instance to send service requests to and receive responses * from. * @return the updated ShareClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is an invalid URL */ /** * Sets the share that the constructed clients will interact with * * @param shareName Name of the share * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code shareName} is {@code null}. */ public ShareClientBuilder shareName(String shareName) { this.shareName = Objects.requireNonNull(shareName, "'shareName' cannot be null."); return this; } /** * Sets the snapshot that the constructed clients will interact with. This snapshot must be linked to the share that * has been specified in the builder. * * @param snapshot Identifier of the snapshot * @return the updated ShareClientBuilder object */ public ShareClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated ShareClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public ShareClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated ShareClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public ShareClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated ShareClientBuilder * @throws IllegalArgumentException If {@code connectionString} in invalid. * @throws NullPointerException If {@code connectionString} is {@code null}. */ public ShareClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, logger); StorageEndpoint endpoint = storageConnectionString.getFileEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw logger .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive file service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated ShareClientBuilder object */ public ShareClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. The policy will be added after the retry policy. If * the method is called multiple times, all policies will be added and their order preserved. * * @param pipelinePolicy a pipeline policy * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public ShareClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public ShareClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default log options with Storage headers and query parameters. * * @return the default log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated ShareClientBuilder object */ public ShareClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public ShareClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated ShareClientBuilder object */ public ShareClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link ShareServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ShareServiceVersion} of the service to be used when making requests. * @return the updated ShareClientBuilder object */ public ShareClientBuilder serviceVersion(ShareServiceVersion version) { this.version = version; return this; } }
Created an issue here https://github.com/Azure/azure-sdk-for-java/issues/6604
public ShareClientBuilder endpoint(String endpoint) { try { URL fullUrl = new URL(endpoint); this.endpoint = fullUrl.getProtocol() + ": this.accountName = BuilderHelper.getAccountName(fullUrl); String[] pathSegments = fullUrl.getPath().split("/"); int length = pathSegments.length; if (length > 3) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot accept a URL to a file or directory to construct a file share client")); } this.shareName = length >= 2 ? pathSegments[1] : this.shareName; String sasToken = new CommonSasQueryParameters( StorageImplUtils.parseQueryStringSplitValues(fullUrl.getQuery()), false).encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage File Service endpoint url is malformed.")); } return this; }
public ShareClientBuilder endpoint(String endpoint) { try { URL fullUrl = new URL(endpoint); this.endpoint = fullUrl.getProtocol() + ": this.accountName = BuilderHelper.getAccountName(fullUrl); String[] pathSegments = fullUrl.getPath().split("/"); int length = pathSegments.length; if (length > 3) { throw logger.logExceptionAsError(new IllegalArgumentException( "Cannot accept a URL to a file or directory to construct a file share client")); } this.shareName = length >= 2 ? pathSegments[1] : this.shareName; String sasToken = new CommonSasQueryParameters( StorageImplUtils.parseQueryStringSplitValues(fullUrl.getQuery()), false).encode(); if (!CoreUtils.isNullOrEmpty(sasToken)) { this.sasToken(sasToken); } } catch (MalformedURLException ex) { throw logger.logExceptionAsError( new IllegalArgumentException("The Azure Storage File Service endpoint url is malformed.")); } return this; }
class ShareClientBuilder { private final ClientLogger logger = new ClientLogger(ShareClientBuilder.class); private String endpoint; private String accountName; private String shareName; private String snapshot; private StorageSharedKeyCredential storageSharedKeyCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private ShareServiceVersion version; /** * Creates a builder instance that is able to configure and construct {@link ShareClient ShareClients} and {@link * ShareAsyncClient ShareAsyncClients}. */ public ShareClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * Creates a {@link ShareAsyncClient} based on options set in the builder. Every time {@code buildAsyncClient()} is * called a new instance of {@link ShareAsyncClient} is created. * * <p> * If {@link ShareClientBuilder * ShareClientBuilder * builder settings are ignored. * </p> * * @return A ShareAsyncClient with the options set from the builder. * @throws NullPointerException If {@code shareName} is {@code null}. * @throws IllegalArgumentException If neither a {@link StorageSharedKeyCredential} or * {@link */ public ShareAsyncClient buildAsyncClient() { Objects.requireNonNull(shareName, "'shareName' cannot be null."); ShareServiceVersion serviceVersion = version != null ? version : ShareServiceVersion.getLatest(); HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline(() -> { if (storageSharedKeyCredential != null) { return new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential); } else if (sasTokenCredential != null) { return new SasTokenCredentialPolicy(sasTokenCredential); } else { throw logger.logExceptionAsError( new IllegalArgumentException("Credentials are required for authorization")); } }, retryOptions, logOptions, httpClient, additionalPolicies, configuration); AzureFileStorageImpl azureFileStorage = new AzureFileStorageBuilder() .url(endpoint) .pipeline(pipeline) .version(serviceVersion.getVersion()) .build(); return new ShareAsyncClient(azureFileStorage, shareName, snapshot, accountName, serviceVersion); } /** * Creates a {@link ShareClient} based on options set in the builder. Every time {@code buildClient()} is called a * new instance of {@link ShareClient} is created. * * <p> * If {@link ShareClientBuilder * ShareClientBuilder * builder settings are ignored. * </p> * * @return A ShareClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} or {@code shareName} is {@code null}. * @throws IllegalStateException If neither a {@link StorageSharedKeyCredential} * or {@link */ public ShareClient buildClient() { return new ShareClient(buildAsyncClient()); } /** * Sets the endpoint for the Azure Storage File instance that the client will interact with. * * <p>The first path segment, if the endpoint contains path segments, will be assumed to be the name of the share * that the client will interact with.</p> * * <p>Query parameters of the endpoint will be parsed in an attempt to generate a SAS token to authenticate * requests sent to the service.</p> * * @param endpoint The URL of the Azure Storage File instance to send service requests to and receive responses * from. * @return the updated ShareClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is an invalid URL */ /** * Sets the share that the constructed clients will interact with * * @param shareName Name of the share * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code shareName} is {@code null}. */ public ShareClientBuilder shareName(String shareName) { this.shareName = Objects.requireNonNull(shareName, "'shareName' cannot be null."); return this; } /** * Sets the snapshot that the constructed clients will interact with. This snapshot must be linked to the share that * has been specified in the builder. * * @param snapshot Identifier of the snapshot * @return the updated ShareClientBuilder object */ public ShareClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated ShareClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public ShareClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated ShareClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public ShareClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated ShareClientBuilder * @throws IllegalArgumentException If {@code connectionString} in invalid. * @throws NullPointerException If {@code connectionString} is {@code null}. */ public ShareClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, logger); StorageEndpoint endpoint = storageConnectionString.getFileEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw logger .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive file service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated ShareClientBuilder object */ public ShareClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. The policy will be added after the retry policy. If * the method is called multiple times, all policies will be added and their order preserved. * * @param pipelinePolicy a pipeline policy * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public ShareClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public ShareClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default log options with Storage headers and query parameters. * * @return the default log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated ShareClientBuilder object */ public ShareClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public ShareClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated ShareClientBuilder object */ public ShareClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link ShareServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ShareServiceVersion} of the service to be used when making requests. * @return the updated ShareClientBuilder object */ public ShareClientBuilder serviceVersion(ShareServiceVersion version) { this.version = version; return this; } }
class ShareClientBuilder { private final ClientLogger logger = new ClientLogger(ShareClientBuilder.class); private String endpoint; private String accountName; private String shareName; private String snapshot; private StorageSharedKeyCredential storageSharedKeyCredential; private SasTokenCredential sasTokenCredential; private HttpClient httpClient; private final List<HttpPipelinePolicy> additionalPolicies = new ArrayList<>(); private HttpLogOptions logOptions; private RequestRetryOptions retryOptions = new RequestRetryOptions(); private HttpPipeline httpPipeline; private Configuration configuration; private ShareServiceVersion version; /** * Creates a builder instance that is able to configure and construct {@link ShareClient ShareClients} and {@link * ShareAsyncClient ShareAsyncClients}. */ public ShareClientBuilder() { logOptions = getDefaultHttpLogOptions(); } /** * Creates a {@link ShareAsyncClient} based on options set in the builder. Every time {@code buildAsyncClient()} is * called a new instance of {@link ShareAsyncClient} is created. * * <p> * If {@link ShareClientBuilder * ShareClientBuilder * builder settings are ignored. * </p> * * @return A ShareAsyncClient with the options set from the builder. * @throws NullPointerException If {@code shareName} is {@code null}. * @throws IllegalArgumentException If neither a {@link StorageSharedKeyCredential} or * {@link */ public ShareAsyncClient buildAsyncClient() { Objects.requireNonNull(shareName, "'shareName' cannot be null."); ShareServiceVersion serviceVersion = version != null ? version : ShareServiceVersion.getLatest(); HttpPipeline pipeline = (httpPipeline != null) ? httpPipeline : BuilderHelper.buildPipeline(() -> { if (storageSharedKeyCredential != null) { return new StorageSharedKeyCredentialPolicy(storageSharedKeyCredential); } else if (sasTokenCredential != null) { return new SasTokenCredentialPolicy(sasTokenCredential); } else { throw logger.logExceptionAsError( new IllegalArgumentException("Credentials are required for authorization")); } }, retryOptions, logOptions, httpClient, additionalPolicies, configuration); AzureFileStorageImpl azureFileStorage = new AzureFileStorageBuilder() .url(endpoint) .pipeline(pipeline) .version(serviceVersion.getVersion()) .build(); return new ShareAsyncClient(azureFileStorage, shareName, snapshot, accountName, serviceVersion); } /** * Creates a {@link ShareClient} based on options set in the builder. Every time {@code buildClient()} is called a * new instance of {@link ShareClient} is created. * * <p> * If {@link ShareClientBuilder * ShareClientBuilder * builder settings are ignored. * </p> * * @return A ShareClient with the options set from the builder. * @throws NullPointerException If {@code endpoint} or {@code shareName} is {@code null}. * @throws IllegalStateException If neither a {@link StorageSharedKeyCredential} * or {@link */ public ShareClient buildClient() { return new ShareClient(buildAsyncClient()); } /** * Sets the endpoint for the Azure Storage File instance that the client will interact with. * * <p>The first path segment, if the endpoint contains path segments, will be assumed to be the name of the share * that the client will interact with.</p> * * <p>Query parameters of the endpoint will be parsed in an attempt to generate a SAS token to authenticate * requests sent to the service.</p> * * @param endpoint The URL of the Azure Storage File instance to send service requests to and receive responses * from. * @return the updated ShareClientBuilder object * @throws IllegalArgumentException If {@code endpoint} is {@code null} or is an invalid URL */ /** * Sets the share that the constructed clients will interact with * * @param shareName Name of the share * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code shareName} is {@code null}. */ public ShareClientBuilder shareName(String shareName) { this.shareName = Objects.requireNonNull(shareName, "'shareName' cannot be null."); return this; } /** * Sets the snapshot that the constructed clients will interact with. This snapshot must be linked to the share that * has been specified in the builder. * * @param snapshot Identifier of the snapshot * @return the updated ShareClientBuilder object */ public ShareClientBuilder snapshot(String snapshot) { this.snapshot = snapshot; return this; } /** * Sets the {@link StorageSharedKeyCredential} used to authorize requests sent to the service. * * @param credential The credential to use for authenticating request. * @return the updated ShareClientBuilder * @throws NullPointerException If {@code credential} is {@code null}. */ public ShareClientBuilder credential(StorageSharedKeyCredential credential) { this.storageSharedKeyCredential = Objects.requireNonNull(credential, "'credential' cannot be null."); this.sasTokenCredential = null; return this; } /** * Sets the SAS token used to authorize requests sent to the service. * * @param sasToken The SAS token to use for authenticating requests. * @return the updated ShareClientBuilder * @throws NullPointerException If {@code sasToken} is {@code null}. */ public ShareClientBuilder sasToken(String sasToken) { this.sasTokenCredential = new SasTokenCredential(Objects.requireNonNull(sasToken, "'sasToken' cannot be null.")); this.storageSharedKeyCredential = null; return this; } /** * Sets the connection string to connect to the service. * * @param connectionString Connection string of the storage account. * @return the updated ShareClientBuilder * @throws IllegalArgumentException If {@code connectionString} in invalid. * @throws NullPointerException If {@code connectionString} is {@code null}. */ public ShareClientBuilder connectionString(String connectionString) { StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, logger); StorageEndpoint endpoint = storageConnectionString.getFileEndpoint(); if (endpoint == null || endpoint.getPrimaryUri() == null) { throw logger .logExceptionAsError(new IllegalArgumentException( "connectionString missing required settings to derive file service endpoint.")); } this.endpoint(endpoint.getPrimaryUri()); if (storageConnectionString.getAccountName() != null) { this.accountName = storageConnectionString.getAccountName(); } StorageAuthenticationSettings authSettings = storageConnectionString.getStorageAuthSettings(); if (authSettings.getType() == StorageAuthenticationSettings.Type.ACCOUNT_NAME_KEY) { this.credential(new StorageSharedKeyCredential(authSettings.getAccount().getName(), authSettings.getAccount().getAccessKey())); } else if (authSettings.getType() == StorageAuthenticationSettings.Type.SAS_TOKEN) { this.sasToken(authSettings.getSasToken()); } return this; } /** * Sets the {@link HttpClient} to use for sending a receiving requests to and from the service. * * @param httpClient HttpClient to use for requests. * @return the updated ShareClientBuilder object */ public ShareClientBuilder httpClient(HttpClient httpClient) { if (this.httpClient != null && httpClient == null) { logger.info("'httpClient' is being set to 'null' when it was previously configured."); } this.httpClient = httpClient; return this; } /** * Adds a pipeline policy to apply on each request sent. The policy will be added after the retry policy. If * the method is called multiple times, all policies will be added and their order preserved. * * @param pipelinePolicy a pipeline policy * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code pipelinePolicy} is {@code null}. */ public ShareClientBuilder addPolicy(HttpPipelinePolicy pipelinePolicy) { this.additionalPolicies.add(Objects.requireNonNull(pipelinePolicy, "'pipelinePolicy' cannot be null")); return this; } /** * Sets the {@link HttpLogOptions} for service requests. * * @param logOptions The logging configuration to use when sending and receiving HTTP requests/responses. * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code logOptions} is {@code null}. */ public ShareClientBuilder httpLogOptions(HttpLogOptions logOptions) { this.logOptions = Objects.requireNonNull(logOptions, "'logOptions' cannot be null."); return this; } /** * Gets the default log options with Storage headers and query parameters. * * @return the default log options. */ public static HttpLogOptions getDefaultHttpLogOptions() { return BuilderHelper.getDefaultHttpLogOptions(); } /** * Sets the configuration object used to retrieve environment configuration values during building of the client. * * @param configuration Configuration store used to retrieve environment configurations. * @return the updated ShareClientBuilder object */ public ShareClientBuilder configuration(Configuration configuration) { this.configuration = configuration; return this; } /** * Sets the request retry options for all the requests made through the client. * * @param retryOptions The options used to configure retry behavior. * @return the updated ShareClientBuilder object * @throws NullPointerException If {@code retryOptions} is {@code null}. */ public ShareClientBuilder retryOptions(RequestRetryOptions retryOptions) { this.retryOptions = Objects.requireNonNull(retryOptions, "'retryOptions' cannot be null."); return this; } /** * Sets the {@link HttpPipeline} to use for the service client. * * If {@code pipeline} is set, all other settings are ignored, aside from {@link * * @param httpPipeline HttpPipeline to use for sending service requests and receiving responses. * @return the updated ShareClientBuilder object */ public ShareClientBuilder pipeline(HttpPipeline httpPipeline) { if (this.httpPipeline != null && httpPipeline == null) { logger.info("HttpPipeline is being set to 'null' when it was previously configured."); } this.httpPipeline = httpPipeline; return this; } /** * Sets the {@link ShareServiceVersion} that is used when making API requests. * <p> * If a service version is not provided, the service version that will be used will be the latest known service * version based on the version of the client library being used. If no service version is specified, updating to a * newer version the client library will have the result of potentially moving to a newer service version. * * @param version {@link ShareServiceVersion} of the service to be used when making requests. * @return the updated ShareClientBuilder object */ public ShareClientBuilder serviceVersion(ShareServiceVersion version) { this.version = version; return this; } }
Can you add some assertions here to test the hypothesis?
public void clientProvidedMultipleHeaderTest() throws Exception { String customRequestId = "request-id-value"; final HttpHeaders headers = new HttpHeaders(); headers.put("x-ms-client-request-id", customRequestId); headers.put("my-header1", "my-header1-value"); headers.put("my-header2", "my-header2-value"); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { Assertions.assertEquals(request.getHeaders().getValue("x-ms-client-request-id"), customRequestId); Assertions.assertEquals(request.getHeaders().getValue("my-header1"), "my-header1-value"); Assertions.assertEquals(request.getHeaders().getValue("my-header2"), "my-header2-value"); return Mono.just(mockResponse); } }) .policies(new RequestIdPolicy()) .policies(new AddHeadersFromContextPolicy()) .build(); pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: }
pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http:
public void clientProvidedMultipleHeaderTest() throws Exception { String customRequestId = "request-id-value"; final HttpHeaders headers = new HttpHeaders(); headers.put("x-ms-client-request-id", customRequestId); headers.put("my-header1", "my-header1-value"); headers.put("my-header2", "my-header2-value"); final HttpPipeline pipeline = new HttpPipelineBuilder() .httpClient(new NoOpHttpClient() { @Override public Mono<HttpResponse> send(HttpRequest request) { Assertions.assertEquals(request.getHeaders().getValue("x-ms-client-request-id"), customRequestId); Assertions.assertEquals(request.getHeaders().getValue("my-header1"), "my-header1-value"); Assertions.assertEquals(request.getHeaders().getValue("my-header2"), "my-header2-value"); return Mono.just(mockResponse); } }) .policies(new RequestIdPolicy()) .policies(new AddHeadersFromContextPolicy()) .build(); pipeline.send(new HttpRequest(HttpMethod.GET, new URL("http: }
class AddHeadersFromContextPolicyTest { private final HttpResponse mockResponse = new HttpResponse(null) { @Override public int getStatusCode() { return 500; } @Override public String getHeaderValue(String name) { return null; } @Override public HttpHeaders getHeaders() { return new HttpHeaders(); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.empty(); } @Override public Flux<ByteBuffer> getBody() { return Flux.empty(); } @Override public Mono<String> getBodyAsString() { return Mono.empty(); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.empty(); } }; @Test }
class AddHeadersFromContextPolicyTest { private final HttpResponse mockResponse = new HttpResponse(null) { @Override public int getStatusCode() { return 500; } @Override public String getHeaderValue(String name) { return null; } @Override public HttpHeaders getHeaders() { return new HttpHeaders(); } @Override public Mono<byte[]> getBodyAsByteArray() { return Mono.empty(); } @Override public Flux<ByteBuffer> getBody() { return Flux.empty(); } @Override public Mono<String> getBodyAsString() { return Mono.empty(); } @Override public Mono<String> getBodyAsString(Charset charset) { return Mono.empty(); } }; @Test }
This could be replaced with ```java context.getData(AZURE_REQUEST_HTTP_HEADERS_KEY).ifPresent(headers -> { // for loop over headers }); ```
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { Optional<Object> customHttpHeadersObject = context.getData(AZURE_REQUEST_HTTP_HEADERS_KEY); if (customHttpHeadersObject.isPresent() && customHttpHeadersObject.get() instanceof HttpHeaders) { HttpHeaders customHttpHeaders = (HttpHeaders) customHttpHeadersObject.get(); for (HttpHeader httpHeader : customHttpHeaders) { if (!Objects.isNull(httpHeader.getName()) && !Objects.isNull(httpHeader.getValue())) { context.getHttpRequest().getHeaders().put(httpHeader.getName(), httpHeader.getValue()); } } } return next.process(); }
HttpHeaders customHttpHeaders = (HttpHeaders) customHttpHeadersObject.get();
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { context.getData(AZURE_REQUEST_HTTP_HEADERS_KEY).ifPresent(headers -> { if (headers instanceof HttpHeaders) { HttpHeaders customHttpHeaders = (HttpHeaders) headers; for (HttpHeader httpHeader : customHttpHeaders) { if (!Objects.isNull(httpHeader.getName()) && !Objects.isNull(httpHeader.getValue())) { context.getHttpRequest().getHeaders().put(httpHeader.getName(), httpHeader.getValue()); } } } }); return next.process(); }
class AddHeadersFromContextPolicy implements HttpPipelinePolicy { @Override }
class AddHeadersFromContextPolicy implements HttpPipelinePolicy { /**Key used to override headers in HttpRequest. The Value for this key should be {@link HttpHeaders}.*/ public static final String AZURE_REQUEST_HTTP_HEADERS_KEY = "azure-http-headers-key"; @Override }
The variable is not named `messageIdHeaderName`.
public RequestIdPolicy(String requestIdHeaderName) { this.requestIdHeaderName = Objects.requireNonNull(requestIdHeaderName, "messageIdHeaderName can not be null."); }
"messageIdHeaderName can not be null.");
public RequestIdPolicy(String requestIdHeaderName) { this.requestIdHeaderName = Objects.requireNonNull(requestIdHeaderName, "requestIdHeaderName can not be null."); }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final String requestIdHeaderName; /** * Creates {@link RequestIdPolicy} with provided {@code requestIdHeaderName}. * @param requestIdHeaderName to be used to set in {@link HttpRequest}. */ /** * Creates default {@link RequestIdPolicy} with default header name 'x-ms-client-request-id'. */ public RequestIdPolicy() { requestIdHeaderName = REQUEST_ID_HEADER; } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String requestId = context.getHttpRequest().getHeaders().getValue(requestIdHeaderName); if (requestId == null) { context.getHttpRequest().getHeaders().put(requestIdHeaderName, UUID.randomUUID().toString()); } return next.process(); } }
class RequestIdPolicy implements HttpPipelinePolicy { private static final String REQUEST_ID_HEADER = "x-ms-client-request-id"; private final String requestIdHeaderName; /** * Creates {@link RequestIdPolicy} with provided {@code requestIdHeaderName}. * @param requestIdHeaderName to be used to set in {@link HttpRequest}. */ /** * Creates default {@link RequestIdPolicy} with default header name 'x-ms-client-request-id'. */ public RequestIdPolicy() { requestIdHeaderName = REQUEST_ID_HEADER; } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { String requestId = context.getHttpRequest().getHeaders().getValue(requestIdHeaderName); if (requestId == null) { context.getHttpRequest().getHeaders().put(requestIdHeaderName, UUID.randomUUID().toString()); } return next.process(); } }
Removed all `String.format()`s, now using `StringBuilder.append` for all additions to the log message.
private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { /* * Logging is either disabled or the logging level is above information (warning or error), this will result * in nothing being logged so perform a no-op. */ int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (numericLogLevel == LogLevel.DISABLED.toNumeric() || numericLogLevel > LogLevel.INFORMATIONAL.toNumeric()) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append(String.format("--> %s %s%n", request.getHttpMethod(), getRedactedUrl(request.getUrl()))); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append(String.format("(empty body)%n--> END %s%n", request.getHttpMethod())); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (bodyIsPrintable(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> Mono.just(requestLogMessage.append(String.format("%d-byte body:%n%s%n--> END %s%n", contentLength, new String(bytes, StandardCharsets.UTF_8), request.getHttpMethod())) .toString())); } else { requestLogMessage.append(String.format("%d-byte body: (content not logged)%n--> END %s%n", contentLength, request.getHttpMethod())); } } } return requestLoggingMono.doOnNext(logger::info); }
Mono.just(requestLogMessage.append(String.format("%d-byte body:%n%s%n--> END %s%n",
private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append("--> ") .append(request.getHttpMethod()) .append(" ") .append(getRedactedUrl(request.getUrl())) .append(System.lineSeparator()); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append("(empty body)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (shouldBodyBeLogged(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> { requestLogMessage.append(contentLength) .append("-byte body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentType, new String(bytes, StandardCharsets.UTF_8))) .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); return Mono.just(requestLogMessage.toString()); }); } else { requestLogMessage.append(contentLength) .append("-byte body: (content not logged)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } } } return requestLoggingMono.doOnNext(logger::info); }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private static final String APPLICATION_OCTET_STREAM = "application/octet-stream"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJSON; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJSON If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJSON) { this.prettyPrintJSON = prettyPrintJSON; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(String::toLowerCase) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(String::toLowerCase) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { /* * Logging is either disabled or the logging level is above information (warning or error), this will result * in nothing being logged so perform a no-op. */ int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (numericLogLevel == LogLevel.DISABLED.toNumeric() || numericLogLevel > LogLevel.INFORMATIONAL.toNumeric()) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length" : contentLengthString + "-byte"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append(String.format("<-- %d %s (%d ms, %s body)%n", response.getStatusCode(), getRedactedUrl(response.getRequest().getUrl()), tookMs, bodySize)); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (bodyIsPrintable(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> Mono.just(responseLogMessage.append(String.format("Response body:%n%s%n<-- END HTTP", prettyPrintIfNeeded(logger, contentTypeHeader, body))) .toString())) .switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); } private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJSON && contentType != null && (contentType.startsWith("application/json") || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; try { contentLength = Long.parseLong(headers.getValue("content-length")); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } private boolean bodyIsPrintable(String contentTypeHeader, long contentLength) { return !APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJson; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJson If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJson) { this.prettyPrintJson = prettyPrintJson; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(headerName -> headerName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } /* * Logs the HTTP request. * * @param logger Logger used to log the request. * @param request HTTP request being sent to Azure. * @return A Mono which will emit the string to log. */ /* * Logs thr HTTP response. * * @param logger Logger used to log the response. * @param response HTTP response returned from Azure. * @param startNs Nanosecond representation of when the request was sent. * @return A Mono containing the HTTP response. */ private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length body" : contentLengthString + "-byte body"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append("<-- ") .append(response.getStatusCode()) .append(" ") .append(getRedactedUrl(response.getRequest().getUrl())) .append(" (") .append(tookMs) .append(" ms, ") .append(bodySize) .append(")") .append(System.lineSeparator()); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (shouldBodyBeLogged(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> { responseLogMessage.append("Response body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentTypeHeader, body)) .append(System.lineSeparator()) .append("<-- END HTTP"); return Mono.just(responseLogMessage.toString()); }).switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); } /* * Determines if logging should be skipped. * * <p>Logging is skipped if the environment log level doesn't support logging at the informational or verbose level. * All logging in this policy occurs at the information level.</p> * * @param environmentLogLevel Log level configured in the environment at the time logging begins. * @return A flag indicating if logging should be skipped. */ private boolean shouldLoggingBeSkipped(int environmentLogLevel) { return environmentLogLevel > LogLevel.INFORMATIONAL.toNumeric(); } /* * Generates the redacted URL for logging. * * @param url URL where the request is being sent. * @return A URL with query parameters redacted based on configurations in this policy. */ private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } /* * Generates the logging safe query parameters string. * * @param queryString Query parameter string from the request URL. * @return A query parameter string redacted based on the configurations in this policy. */ private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } /* * Adds HTTP headers into the StringBuilder that is generating the log message. * * @param headers HTTP headers on the request or response. * @param sb StringBuilder that is generating the log message. * @param logLevel Log level the environment is configured to use. */ private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } /* * Determines and attempts to pretty print the body if it is JSON. * * <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p> * * @param logger Logger used to log a warning if the body fails to pretty print as JSON. * @param contentType Content-Type header. * @param body Body of the request or response. * @return The body pretty printed if it is JSON, otherwise the unmodified body. */ private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJson && contentType != null && (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } /* * Attempts to retrieve and parse the Content-Length header into a numeric representation. * * @param logger Logger used to log a warning if the Content-Length header is an invalid number. * @param headers HTTP headers that are checked for containing Content-Length. * @return */ private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; String contentLengthString = headers.getValue("Content-Length"); if (CoreUtils.isNullOrEmpty(contentLengthString)) { return contentLength; } try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } /* * Determines if the request or response body should be logged. * * <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body * isn't empty and is less than 16KB in size.</p> * * @param contentTypeHeader Content-Type header value. * @param contentLength Content-Length header represented as a numeric. * @return A flag indicating if the request or response body should be logged. */ private boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) { return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
I am checking for `headers instanceof HttpHeaders` to make sure we do not have ClassCastException if end user did not set key as `HttpHeaders` .
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { context.getData(AZURE_REQUEST_HTTP_HEADERS_KEY).ifPresent(headers -> { if (headers instanceof HttpHeaders) { HttpHeaders customHttpHeaders = (HttpHeaders) headers; for (HttpHeader httpHeader : customHttpHeaders) { if (!Objects.isNull(httpHeader.getName()) && !Objects.isNull(httpHeader.getValue())) { context.getHttpRequest().getHeaders().put(httpHeader.getName(), httpHeader.getValue()); } } } }); return next.process(); }
if (headers instanceof HttpHeaders) {
public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { context.getData(AZURE_REQUEST_HTTP_HEADERS_KEY).ifPresent(headers -> { if (headers instanceof HttpHeaders) { HttpHeaders customHttpHeaders = (HttpHeaders) headers; for (HttpHeader httpHeader : customHttpHeaders) { if (!Objects.isNull(httpHeader.getName()) && !Objects.isNull(httpHeader.getValue())) { context.getHttpRequest().getHeaders().put(httpHeader.getName(), httpHeader.getValue()); } } } }); return next.process(); }
class AddHeadersFromContextPolicy implements HttpPipelinePolicy { /**Key used to override headers in HttpRequest. The Value for this key should be {@link HttpHeaders}.*/ public static final String AZURE_REQUEST_HTTP_HEADERS_KEY = "azure-http-headers-key"; @Override }
class AddHeadersFromContextPolicy implements HttpPipelinePolicy { /**Key used to override headers in HttpRequest. The Value for this key should be {@link HttpHeaders}.*/ public static final String AZURE_REQUEST_HTTP_HEADERS_KEY = "azure-http-headers-key"; @Override }
Good catch, changed.
private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { /* * Logging is either disabled or the logging level is above information (warning or error), this will result * in nothing being logged so perform a no-op. */ int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (numericLogLevel == LogLevel.DISABLED.toNumeric() || numericLogLevel > LogLevel.INFORMATIONAL.toNumeric()) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append(String.format("--> %s %s%n", request.getHttpMethod(), getRedactedUrl(request.getUrl()))); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append(String.format("(empty body)%n--> END %s%n", request.getHttpMethod())); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (bodyIsPrintable(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> Mono.just(requestLogMessage.append(String.format("%d-byte body:%n%s%n--> END %s%n", contentLength, new String(bytes, StandardCharsets.UTF_8), request.getHttpMethod())) .toString())); } else { requestLogMessage.append(String.format("%d-byte body: (content not logged)%n--> END %s%n", contentLength, request.getHttpMethod())); } } } return requestLoggingMono.doOnNext(logger::info); }
|| numericLogLevel > LogLevel.INFORMATIONAL.toNumeric()) {
private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append("--> ") .append(request.getHttpMethod()) .append(" ") .append(getRedactedUrl(request.getUrl())) .append(System.lineSeparator()); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append("(empty body)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (shouldBodyBeLogged(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> { requestLogMessage.append(contentLength) .append("-byte body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentType, new String(bytes, StandardCharsets.UTF_8))) .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); return Mono.just(requestLogMessage.toString()); }); } else { requestLogMessage.append(contentLength) .append("-byte body: (content not logged)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } } } return requestLoggingMono.doOnNext(logger::info); }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private static final String APPLICATION_OCTET_STREAM = "application/octet-stream"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJSON; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJSON If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJSON) { this.prettyPrintJSON = prettyPrintJSON; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(String::toLowerCase) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(String::toLowerCase) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { /* * Logging is either disabled or the logging level is above information (warning or error), this will result * in nothing being logged so perform a no-op. */ int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (numericLogLevel == LogLevel.DISABLED.toNumeric() || numericLogLevel > LogLevel.INFORMATIONAL.toNumeric()) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length" : contentLengthString + "-byte"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append(String.format("<-- %d %s (%d ms, %s body)%n", response.getStatusCode(), getRedactedUrl(response.getRequest().getUrl()), tookMs, bodySize)); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (bodyIsPrintable(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> Mono.just(responseLogMessage.append(String.format("Response body:%n%s%n<-- END HTTP", prettyPrintIfNeeded(logger, contentTypeHeader, body))) .toString())) .switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); } private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJSON && contentType != null && (contentType.startsWith("application/json") || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; try { contentLength = Long.parseLong(headers.getValue("content-length")); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } private boolean bodyIsPrintable(String contentTypeHeader, long contentLength) { return !APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJson; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJson If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJson) { this.prettyPrintJson = prettyPrintJson; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(headerName -> headerName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } /* * Logs the HTTP request. * * @param logger Logger used to log the request. * @param request HTTP request being sent to Azure. * @return A Mono which will emit the string to log. */ /* * Logs thr HTTP response. * * @param logger Logger used to log the response. * @param response HTTP response returned from Azure. * @param startNs Nanosecond representation of when the request was sent. * @return A Mono containing the HTTP response. */ private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length body" : contentLengthString + "-byte body"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append("<-- ") .append(response.getStatusCode()) .append(" ") .append(getRedactedUrl(response.getRequest().getUrl())) .append(" (") .append(tookMs) .append(" ms, ") .append(bodySize) .append(")") .append(System.lineSeparator()); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (shouldBodyBeLogged(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> { responseLogMessage.append("Response body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentTypeHeader, body)) .append(System.lineSeparator()) .append("<-- END HTTP"); return Mono.just(responseLogMessage.toString()); }).switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); } /* * Determines if logging should be skipped. * * <p>Logging is skipped if the environment log level doesn't support logging at the informational or verbose level. * All logging in this policy occurs at the information level.</p> * * @param environmentLogLevel Log level configured in the environment at the time logging begins. * @return A flag indicating if logging should be skipped. */ private boolean shouldLoggingBeSkipped(int environmentLogLevel) { return environmentLogLevel > LogLevel.INFORMATIONAL.toNumeric(); } /* * Generates the redacted URL for logging. * * @param url URL where the request is being sent. * @return A URL with query parameters redacted based on configurations in this policy. */ private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } /* * Generates the logging safe query parameters string. * * @param queryString Query parameter string from the request URL. * @return A query parameter string redacted based on the configurations in this policy. */ private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } /* * Adds HTTP headers into the StringBuilder that is generating the log message. * * @param headers HTTP headers on the request or response. * @param sb StringBuilder that is generating the log message. * @param logLevel Log level the environment is configured to use. */ private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } /* * Determines and attempts to pretty print the body if it is JSON. * * <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p> * * @param logger Logger used to log a warning if the body fails to pretty print as JSON. * @param contentType Content-Type header. * @param body Body of the request or response. * @return The body pretty printed if it is JSON, otherwise the unmodified body. */ private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJson && contentType != null && (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } /* * Attempts to retrieve and parse the Content-Length header into a numeric representation. * * @param logger Logger used to log a warning if the Content-Length header is an invalid number. * @param headers HTTP headers that are checked for containing Content-Length. * @return */ private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; String contentLengthString = headers.getValue("Content-Length"); if (CoreUtils.isNullOrEmpty(contentLengthString)) { return contentLength; } try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } /* * Determines if the request or response body should be logged. * * <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body * isn't empty and is less than 16KB in size.</p> * * @param contentTypeHeader Content-Type header value. * @param contentLength Content-Length header represented as a numeric. * @return A flag indicating if the request or response body should be logged. */ private boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) { return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
Avoid using `String.format()` as this code gets executed for every request (if logging is enabled) and the performance of `String.format()` is bad. Also, the environment variable for logging might be set to a lower numeric value but the slf4j log level might be higher which means that all these strings will be created but not really logged.
private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { /* * Logging is either disabled or the logging level is above information (warning or error), this will result * in nothing being logged so perform a no-op. */ int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (numericLogLevel == LogLevel.DISABLED.toNumeric() || numericLogLevel > LogLevel.INFORMATIONAL.toNumeric()) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append(String.format("--> %s %s%n", request.getHttpMethod(), getRedactedUrl(request.getUrl()))); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append(String.format("(empty body)%n--> END %s%n", request.getHttpMethod())); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (bodyIsPrintable(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> Mono.just(requestLogMessage.append(String.format("%d-byte body:%n%s%n--> END %s%n", contentLength, new String(bytes, StandardCharsets.UTF_8), request.getHttpMethod())) .toString())); } else { requestLogMessage.append(String.format("%d-byte body: (content not logged)%n--> END %s%n", contentLength, request.getHttpMethod())); } } } return requestLoggingMono.doOnNext(logger::info); }
Mono.just(requestLogMessage.append(String.format("%d-byte body:%n%s%n--> END %s%n",
private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append("--> ") .append(request.getHttpMethod()) .append(" ") .append(getRedactedUrl(request.getUrl())) .append(System.lineSeparator()); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append("(empty body)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (shouldBodyBeLogged(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> { requestLogMessage.append(contentLength) .append("-byte body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentType, new String(bytes, StandardCharsets.UTF_8))) .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); return Mono.just(requestLogMessage.toString()); }); } else { requestLogMessage.append(contentLength) .append("-byte body: (content not logged)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } } } return requestLoggingMono.doOnNext(logger::info); }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private static final String APPLICATION_OCTET_STREAM = "application/octet-stream"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJSON; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJSON If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJSON) { this.prettyPrintJSON = prettyPrintJSON; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(String::toLowerCase) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(String::toLowerCase) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { /* * Logging is either disabled or the logging level is above information (warning or error), this will result * in nothing being logged so perform a no-op. */ int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (numericLogLevel == LogLevel.DISABLED.toNumeric() || numericLogLevel > LogLevel.INFORMATIONAL.toNumeric()) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length" : contentLengthString + "-byte"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append(String.format("<-- %d %s (%d ms, %s body)%n", response.getStatusCode(), getRedactedUrl(response.getRequest().getUrl()), tookMs, bodySize)); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (bodyIsPrintable(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> Mono.just(responseLogMessage.append(String.format("Response body:%n%s%n<-- END HTTP", prettyPrintIfNeeded(logger, contentTypeHeader, body))) .toString())) .switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); } private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJSON && contentType != null && (contentType.startsWith("application/json") || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; try { contentLength = Long.parseLong(headers.getValue("content-length")); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } private boolean bodyIsPrintable(String contentTypeHeader, long contentLength) { return !APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJson; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJson If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJson) { this.prettyPrintJson = prettyPrintJson; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(headerName -> headerName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } /* * Logs the HTTP request. * * @param logger Logger used to log the request. * @param request HTTP request being sent to Azure. * @return A Mono which will emit the string to log. */ /* * Logs thr HTTP response. * * @param logger Logger used to log the response. * @param response HTTP response returned from Azure. * @param startNs Nanosecond representation of when the request was sent. * @return A Mono containing the HTTP response. */ private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length body" : contentLengthString + "-byte body"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append("<-- ") .append(response.getStatusCode()) .append(" ") .append(getRedactedUrl(response.getRequest().getUrl())) .append(" (") .append(tookMs) .append(" ms, ") .append(bodySize) .append(")") .append(System.lineSeparator()); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (shouldBodyBeLogged(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> { responseLogMessage.append("Response body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentTypeHeader, body)) .append(System.lineSeparator()) .append("<-- END HTTP"); return Mono.just(responseLogMessage.toString()); }).switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); } /* * Determines if logging should be skipped. * * <p>Logging is skipped if the environment log level doesn't support logging at the informational or verbose level. * All logging in this policy occurs at the information level.</p> * * @param environmentLogLevel Log level configured in the environment at the time logging begins. * @return A flag indicating if logging should be skipped. */ private boolean shouldLoggingBeSkipped(int environmentLogLevel) { return environmentLogLevel > LogLevel.INFORMATIONAL.toNumeric(); } /* * Generates the redacted URL for logging. * * @param url URL where the request is being sent. * @return A URL with query parameters redacted based on configurations in this policy. */ private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } /* * Generates the logging safe query parameters string. * * @param queryString Query parameter string from the request URL. * @return A query parameter string redacted based on the configurations in this policy. */ private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } /* * Adds HTTP headers into the StringBuilder that is generating the log message. * * @param headers HTTP headers on the request or response. * @param sb StringBuilder that is generating the log message. * @param logLevel Log level the environment is configured to use. */ private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } /* * Determines and attempts to pretty print the body if it is JSON. * * <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p> * * @param logger Logger used to log a warning if the body fails to pretty print as JSON. * @param contentType Content-Type header. * @param body Body of the request or response. * @return The body pretty printed if it is JSON, otherwise the unmodified body. */ private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJson && contentType != null && (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } /* * Attempts to retrieve and parse the Content-Length header into a numeric representation. * * @param logger Logger used to log a warning if the Content-Length header is an invalid number. * @param headers HTTP headers that are checked for containing Content-Length. * @return */ private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; String contentLengthString = headers.getValue("Content-Length"); if (CoreUtils.isNullOrEmpty(contentLengthString)) { return contentLength; } try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } /* * Determines if the request or response body should be logged. * * <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body * isn't empty and is less than 16KB in size.</p> * * @param contentTypeHeader Content-Type header value. * @param contentLength Content-Length header represented as a numeric. * @return A flag indicating if the request or response body should be logged. */ private boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) { return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
Can this condition be simplified by just using `numericLogLevel > LogLevel.INFORMATIONAL.toNumeric()` since `LogLevel.DISABLED` is 5?
private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { /* * Logging is either disabled or the logging level is above information (warning or error), this will result * in nothing being logged so perform a no-op. */ int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (numericLogLevel == LogLevel.DISABLED.toNumeric() || numericLogLevel > LogLevel.INFORMATIONAL.toNumeric()) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append(String.format("--> %s %s%n", request.getHttpMethod(), getRedactedUrl(request.getUrl()))); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append(String.format("(empty body)%n--> END %s%n", request.getHttpMethod())); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (bodyIsPrintable(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> Mono.just(requestLogMessage.append(String.format("%d-byte body:%n%s%n--> END %s%n", contentLength, new String(bytes, StandardCharsets.UTF_8), request.getHttpMethod())) .toString())); } else { requestLogMessage.append(String.format("%d-byte body: (content not logged)%n--> END %s%n", contentLength, request.getHttpMethod())); } } } return requestLoggingMono.doOnNext(logger::info); }
|| numericLogLevel > LogLevel.INFORMATIONAL.toNumeric()) {
private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append("--> ") .append(request.getHttpMethod()) .append(" ") .append(getRedactedUrl(request.getUrl())) .append(System.lineSeparator()); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append("(empty body)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (shouldBodyBeLogged(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> { requestLogMessage.append(contentLength) .append("-byte body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentType, new String(bytes, StandardCharsets.UTF_8))) .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); return Mono.just(requestLogMessage.toString()); }); } else { requestLogMessage.append(contentLength) .append("-byte body: (content not logged)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } } } return requestLoggingMono.doOnNext(logger::info); }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private static final String APPLICATION_OCTET_STREAM = "application/octet-stream"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJSON; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJSON If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJSON) { this.prettyPrintJSON = prettyPrintJSON; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(String::toLowerCase) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(String::toLowerCase) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { /* * Logging is either disabled or the logging level is above information (warning or error), this will result * in nothing being logged so perform a no-op. */ int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (numericLogLevel == LogLevel.DISABLED.toNumeric() || numericLogLevel > LogLevel.INFORMATIONAL.toNumeric()) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length" : contentLengthString + "-byte"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append(String.format("<-- %d %s (%d ms, %s body)%n", response.getStatusCode(), getRedactedUrl(response.getRequest().getUrl()), tookMs, bodySize)); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (bodyIsPrintable(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> Mono.just(responseLogMessage.append(String.format("Response body:%n%s%n<-- END HTTP", prettyPrintIfNeeded(logger, contentTypeHeader, body))) .toString())) .switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); } private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJSON && contentType != null && (contentType.startsWith("application/json") || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; try { contentLength = Long.parseLong(headers.getValue("content-length")); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } private boolean bodyIsPrintable(String contentTypeHeader, long contentLength) { return !APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJson; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJson If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJson) { this.prettyPrintJson = prettyPrintJson; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(headerName -> headerName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } /* * Logs the HTTP request. * * @param logger Logger used to log the request. * @param request HTTP request being sent to Azure. * @return A Mono which will emit the string to log. */ /* * Logs thr HTTP response. * * @param logger Logger used to log the response. * @param response HTTP response returned from Azure. * @param startNs Nanosecond representation of when the request was sent. * @return A Mono containing the HTTP response. */ private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length body" : contentLengthString + "-byte body"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append("<-- ") .append(response.getStatusCode()) .append(" ") .append(getRedactedUrl(response.getRequest().getUrl())) .append(" (") .append(tookMs) .append(" ms, ") .append(bodySize) .append(")") .append(System.lineSeparator()); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (shouldBodyBeLogged(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> { responseLogMessage.append("Response body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentTypeHeader, body)) .append(System.lineSeparator()) .append("<-- END HTTP"); return Mono.just(responseLogMessage.toString()); }).switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); } /* * Determines if logging should be skipped. * * <p>Logging is skipped if the environment log level doesn't support logging at the informational or verbose level. * All logging in this policy occurs at the information level.</p> * * @param environmentLogLevel Log level configured in the environment at the time logging begins. * @return A flag indicating if logging should be skipped. */ private boolean shouldLoggingBeSkipped(int environmentLogLevel) { return environmentLogLevel > LogLevel.INFORMATIONAL.toNumeric(); } /* * Generates the redacted URL for logging. * * @param url URL where the request is being sent. * @return A URL with query parameters redacted based on configurations in this policy. */ private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } /* * Generates the logging safe query parameters string. * * @param queryString Query parameter string from the request URL. * @return A query parameter string redacted based on the configurations in this policy. */ private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } /* * Adds HTTP headers into the StringBuilder that is generating the log message. * * @param headers HTTP headers on the request or response. * @param sb StringBuilder that is generating the log message. * @param logLevel Log level the environment is configured to use. */ private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } /* * Determines and attempts to pretty print the body if it is JSON. * * <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p> * * @param logger Logger used to log a warning if the body fails to pretty print as JSON. * @param contentType Content-Type header. * @param body Body of the request or response. * @return The body pretty printed if it is JSON, otherwise the unmodified body. */ private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJson && contentType != null && (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } /* * Attempts to retrieve and parse the Content-Length header into a numeric representation. * * @param logger Logger used to log a warning if the Content-Length header is an invalid number. * @param headers HTTP headers that are checked for containing Content-Length. * @return */ private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; String contentLengthString = headers.getValue("Content-Length"); if (CoreUtils.isNullOrEmpty(contentLengthString)) { return contentLength; } try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } /* * Determines if the request or response body should be logged. * * <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body * isn't empty and is less than 16KB in size.</p> * * @param contentTypeHeader Content-Type header value. * @param contentLength Content-Length header represented as a numeric. * @return A flag indicating if the request or response body should be logged. */ private boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) { return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
This is common code between logRequest() and logResponse(). This can be extracted as a method to reduce duplication.
private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { /* * Logging is either disabled or the logging level is above information (warning or error), this will result * in nothing being logged so perform a no-op. */ int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (numericLogLevel == LogLevel.DISABLED.toNumeric() || numericLogLevel > LogLevel.INFORMATIONAL.toNumeric()) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length" : contentLengthString + "-byte"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append(String.format("<-- %d %s (%d ms, %s body)%n", response.getStatusCode(), getRedactedUrl(response.getRequest().getUrl()), tookMs, bodySize)); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (bodyIsPrintable(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> Mono.just(responseLogMessage.append(String.format("Response body:%n%s%n<-- END HTTP", prettyPrintIfNeeded(logger, contentTypeHeader, body))) .toString())) .switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); }
}
private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length body" : contentLengthString + "-byte body"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append("<-- ") .append(response.getStatusCode()) .append(" ") .append(getRedactedUrl(response.getRequest().getUrl())) .append(" (") .append(tookMs) .append(" ms, ") .append(bodySize) .append(")") .append(System.lineSeparator()); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (shouldBodyBeLogged(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> { responseLogMessage.append("Response body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentTypeHeader, body)) .append(System.lineSeparator()) .append("<-- END HTTP"); return Mono.just(responseLogMessage.toString()); }).switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private static final String APPLICATION_OCTET_STREAM = "application/octet-stream"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJSON; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJSON If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJSON) { this.prettyPrintJSON = prettyPrintJSON; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(String::toLowerCase) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(String::toLowerCase) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { /* * Logging is either disabled or the logging level is above information (warning or error), this will result * in nothing being logged so perform a no-op. */ int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (numericLogLevel == LogLevel.DISABLED.toNumeric() || numericLogLevel > LogLevel.INFORMATIONAL.toNumeric()) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append(String.format("--> %s %s%n", request.getHttpMethod(), getRedactedUrl(request.getUrl()))); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append(String.format("(empty body)%n--> END %s%n", request.getHttpMethod())); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (bodyIsPrintable(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> Mono.just(requestLogMessage.append(String.format("%d-byte body:%n%s%n--> END %s%n", contentLength, new String(bytes, StandardCharsets.UTF_8), request.getHttpMethod())) .toString())); } else { requestLogMessage.append(String.format("%d-byte body: (content not logged)%n--> END %s%n", contentLength, request.getHttpMethod())); } } } return requestLoggingMono.doOnNext(logger::info); } private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJSON && contentType != null && (contentType.startsWith("application/json") || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; try { contentLength = Long.parseLong(headers.getValue("content-length")); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } private boolean bodyIsPrintable(String contentTypeHeader, long contentLength) { return !APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJson; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJson If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJson) { this.prettyPrintJson = prettyPrintJson; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(headerName -> headerName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } /* * Logs the HTTP request. * * @param logger Logger used to log the request. * @param request HTTP request being sent to Azure. * @return A Mono which will emit the string to log. */ private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append("--> ") .append(request.getHttpMethod()) .append(" ") .append(getRedactedUrl(request.getUrl())) .append(System.lineSeparator()); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append("(empty body)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (shouldBodyBeLogged(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> { requestLogMessage.append(contentLength) .append("-byte body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentType, new String(bytes, StandardCharsets.UTF_8))) .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); return Mono.just(requestLogMessage.toString()); }); } else { requestLogMessage.append(contentLength) .append("-byte body: (content not logged)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } } } return requestLoggingMono.doOnNext(logger::info); } /* * Logs thr HTTP response. * * @param logger Logger used to log the response. * @param response HTTP response returned from Azure. * @param startNs Nanosecond representation of when the request was sent. * @return A Mono containing the HTTP response. */ /* * Determines if logging should be skipped. * * <p>Logging is skipped if the environment log level doesn't support logging at the informational or verbose level. * All logging in this policy occurs at the information level.</p> * * @param environmentLogLevel Log level configured in the environment at the time logging begins. * @return A flag indicating if logging should be skipped. */ private boolean shouldLoggingBeSkipped(int environmentLogLevel) { return environmentLogLevel > LogLevel.INFORMATIONAL.toNumeric(); } /* * Generates the redacted URL for logging. * * @param url URL where the request is being sent. * @return A URL with query parameters redacted based on configurations in this policy. */ private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } /* * Generates the logging safe query parameters string. * * @param queryString Query parameter string from the request URL. * @return A query parameter string redacted based on the configurations in this policy. */ private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } /* * Adds HTTP headers into the StringBuilder that is generating the log message. * * @param headers HTTP headers on the request or response. * @param sb StringBuilder that is generating the log message. * @param logLevel Log level the environment is configured to use. */ private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } /* * Determines and attempts to pretty print the body if it is JSON. * * <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p> * * @param logger Logger used to log a warning if the body fails to pretty print as JSON. * @param contentType Content-Type header. * @param body Body of the request or response. * @return The body pretty printed if it is JSON, otherwise the unmodified body. */ private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJson && contentType != null && (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } /* * Attempts to retrieve and parse the Content-Length header into a numeric representation. * * @param logger Logger used to log a warning if the Content-Length header is an invalid number. * @param headers HTTP headers that are checked for containing Content-Length. * @return */ private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; String contentLengthString = headers.getValue("Content-Length"); if (CoreUtils.isNullOrEmpty(contentLengthString)) { return contentLength; } try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } /* * Determines if the request or response body should be logged. * * <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body * isn't empty and is less than 16KB in size.</p> * * @param contentTypeHeader Content-Type header value. * @param contentLength Content-Length header represented as a numeric. * @return A flag indicating if the request or response body should be logged. */ private boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) { return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
Created a method for these.
private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { /* * Logging is either disabled or the logging level is above information (warning or error), this will result * in nothing being logged so perform a no-op. */ int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (numericLogLevel == LogLevel.DISABLED.toNumeric() || numericLogLevel > LogLevel.INFORMATIONAL.toNumeric()) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length" : contentLengthString + "-byte"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append(String.format("<-- %d %s (%d ms, %s body)%n", response.getStatusCode(), getRedactedUrl(response.getRequest().getUrl()), tookMs, bodySize)); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (bodyIsPrintable(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> Mono.just(responseLogMessage.append(String.format("Response body:%n%s%n<-- END HTTP", prettyPrintIfNeeded(logger, contentTypeHeader, body))) .toString())) .switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); }
}
private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length body" : contentLengthString + "-byte body"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append("<-- ") .append(response.getStatusCode()) .append(" ") .append(getRedactedUrl(response.getRequest().getUrl())) .append(" (") .append(tookMs) .append(" ms, ") .append(bodySize) .append(")") .append(System.lineSeparator()); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (shouldBodyBeLogged(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> { responseLogMessage.append("Response body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentTypeHeader, body)) .append(System.lineSeparator()) .append("<-- END HTTP"); return Mono.just(responseLogMessage.toString()); }).switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private static final String APPLICATION_OCTET_STREAM = "application/octet-stream"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJSON; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJSON If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJSON) { this.prettyPrintJSON = prettyPrintJSON; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(String::toLowerCase) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(String::toLowerCase) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { /* * Logging is either disabled or the logging level is above information (warning or error), this will result * in nothing being logged so perform a no-op. */ int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (numericLogLevel == LogLevel.DISABLED.toNumeric() || numericLogLevel > LogLevel.INFORMATIONAL.toNumeric()) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append(String.format("--> %s %s%n", request.getHttpMethod(), getRedactedUrl(request.getUrl()))); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append(String.format("(empty body)%n--> END %s%n", request.getHttpMethod())); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (bodyIsPrintable(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> Mono.just(requestLogMessage.append(String.format("%d-byte body:%n%s%n--> END %s%n", contentLength, new String(bytes, StandardCharsets.UTF_8), request.getHttpMethod())) .toString())); } else { requestLogMessage.append(String.format("%d-byte body: (content not logged)%n--> END %s%n", contentLength, request.getHttpMethod())); } } } return requestLoggingMono.doOnNext(logger::info); } private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJSON && contentType != null && (contentType.startsWith("application/json") || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; try { contentLength = Long.parseLong(headers.getValue("content-length")); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } private boolean bodyIsPrintable(String contentTypeHeader, long contentLength) { return !APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJson; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJson If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJson) { this.prettyPrintJson = prettyPrintJson; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(headerName -> headerName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } /* * Logs the HTTP request. * * @param logger Logger used to log the request. * @param request HTTP request being sent to Azure. * @return A Mono which will emit the string to log. */ private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append("--> ") .append(request.getHttpMethod()) .append(" ") .append(getRedactedUrl(request.getUrl())) .append(System.lineSeparator()); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append("(empty body)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (shouldBodyBeLogged(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> { requestLogMessage.append(contentLength) .append("-byte body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentType, new String(bytes, StandardCharsets.UTF_8))) .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); return Mono.just(requestLogMessage.toString()); }); } else { requestLogMessage.append(contentLength) .append("-byte body: (content not logged)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } } } return requestLoggingMono.doOnNext(logger::info); } /* * Logs thr HTTP response. * * @param logger Logger used to log the response. * @param response HTTP response returned from Azure. * @param startNs Nanosecond representation of when the request was sent. * @return A Mono containing the HTTP response. */ /* * Determines if logging should be skipped. * * <p>Logging is skipped if the environment log level doesn't support logging at the informational or verbose level. * All logging in this policy occurs at the information level.</p> * * @param environmentLogLevel Log level configured in the environment at the time logging begins. * @return A flag indicating if logging should be skipped. */ private boolean shouldLoggingBeSkipped(int environmentLogLevel) { return environmentLogLevel > LogLevel.INFORMATIONAL.toNumeric(); } /* * Generates the redacted URL for logging. * * @param url URL where the request is being sent. * @return A URL with query parameters redacted based on configurations in this policy. */ private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } /* * Generates the logging safe query parameters string. * * @param queryString Query parameter string from the request URL. * @return A query parameter string redacted based on the configurations in this policy. */ private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } /* * Adds HTTP headers into the StringBuilder that is generating the log message. * * @param headers HTTP headers on the request or response. * @param sb StringBuilder that is generating the log message. * @param logLevel Log level the environment is configured to use. */ private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } /* * Determines and attempts to pretty print the body if it is JSON. * * <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p> * * @param logger Logger used to log a warning if the body fails to pretty print as JSON. * @param contentType Content-Type header. * @param body Body of the request or response. * @return The body pretty printed if it is JSON, otherwise the unmodified body. */ private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJson && contentType != null && (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } /* * Attempts to retrieve and parse the Content-Length header into a numeric representation. * * @param logger Logger used to log a warning if the Content-Length header is an invalid number. * @param headers HTTP headers that are checked for containing Content-Length. * @return */ private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; String contentLengthString = headers.getValue("Content-Length"); if (CoreUtils.isNullOrEmpty(contentLengthString)) { return contentLength; } try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } /* * Determines if the request or response body should be logged. * * <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body * isn't empty and is less than 16KB in size.</p> * * @param contentTypeHeader Content-Type header value. * @param contentLength Content-Length header represented as a numeric. * @return A flag indicating if the request or response body should be logged. */ private boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) { return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
This line can be moved inside `shouldLoggingBeSkipped()` as well and the method can be parameter-less.
private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length body" : contentLengthString + "-byte body"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append("<-- ") .append(response.getStatusCode()) .append(" ").append(getRedactedUrl(response.getRequest().getUrl())) .append(" (").append(tookMs).append(" ms, ") .append(bodySize).append(")") .append(System.lineSeparator()); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (shouldBodyBeLogged(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> { responseLogMessage.append("Response body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentTypeHeader, body)) .append(System.lineSeparator()) .append("<-- END HTTP"); return Mono.just(responseLogMessage.toString()); }).switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); }
int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric();
private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length body" : contentLengthString + "-byte body"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append("<-- ") .append(response.getStatusCode()) .append(" ") .append(getRedactedUrl(response.getRequest().getUrl())) .append(" (") .append(tookMs) .append(" ms, ") .append(bodySize) .append(")") .append(System.lineSeparator()); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (shouldBodyBeLogged(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> { responseLogMessage.append("Response body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentTypeHeader, body)) .append(System.lineSeparator()) .append("<-- END HTTP"); return Mono.just(responseLogMessage.toString()); }).switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJSON; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJson If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJson) { this.prettyPrintJSON = prettyPrintJson; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(headerName -> headerName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } /* * Logs the HTTP request. * * @param logger Logger used to log the request. * @param request HTTP request being sent to Azure. * @return A Mono which will emit the string to log. */ private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append("--> ") .append(request.getHttpMethod()) .append(" ") .append(getRedactedUrl(request.getUrl())) .append(System.lineSeparator()); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append("(empty body)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (shouldBodyBeLogged(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> { requestLogMessage.append(contentLength) .append("-byte body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentType, new String(bytes, StandardCharsets.UTF_8))) .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); return Mono.just(requestLogMessage.toString()); }); } else { requestLogMessage.append(contentLength) .append("-byte body: (content not logged)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } } } return requestLoggingMono.doOnNext(logger::info); } /* * Logs thr HTTP response. * * @param logger Logger used to log the response. * @param response HTTP response returned from Azure. * @param startNs Nanosecond representation of when the request was sent. * @return A Mono containing the HTTP response. */ /* * Determines if logging should be skipped. * * <p>Logging is skipped if the environment log level doesn't support logging at the informational or verbose level. * All logging in this policy occurs at the information level.</p> * * @param environmentLogLevel Log level configured in the environment at the time logging begins. * @return A flag indicating if logging should be skipped. */ private boolean shouldLoggingBeSkipped(int environmentLogLevel) { return environmentLogLevel <= LogLevel.INFORMATIONAL.toNumeric(); } /* * Generates the redacted URL for logging. * * @param url URL where the request is being sent. * @return A URL with query parameters redacted based on configurations in this policy. */ private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } /* * Generates the logging safe query parameters string. * * @param queryString Query parameter string from the request URL. * @return A query parameter string redacted based on the configurations in this policy. */ private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } /* * Adds HTTP headers into the StringBuilder that is generating the log message. * * @param headers HTTP headers on the request or response. * @param sb StringBuilder that is generating the log message. * @param logLevel Log level the environment is configured to use. */ private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } /* * Determines and attempts to pretty print the body if it is JSON. * * <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p> * * @param logger Logger used to log a warning if the body fails to pretty print as JSON. * @param contentType Content-Type header. * @param body Body of the request or response. * @return The body pretty printed if it is JSON, otherwise the unmodified body. */ private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJSON && contentType != null && (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } /* * Attempts to retrieve and parse the Content-Length header into a numeric representation. * * @param logger Logger used to log a warning if the Content-Length header is an invalid number. * @param headers HTTP headers that are checked for containing Content-Length. * @return */ private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; String contentLengthString = headers.getValue("Content-Length"); if (CoreUtils.isNullOrEmpty(contentLengthString)) { return contentLength; } try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } /* * Determines if the request or response body should be logged. * * <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body * isn't empty and is less than 16KB in size.</p> * * @param contentTypeHeader Content-Type header value. * @param contentLength Content-Length header represented as a numeric. * @return A flag indicating if the request or response body should be logged. */ private boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) { return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJson; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJson If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJson) { this.prettyPrintJson = prettyPrintJson; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(headerName -> headerName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } /* * Logs the HTTP request. * * @param logger Logger used to log the request. * @param request HTTP request being sent to Azure. * @return A Mono which will emit the string to log. */ private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append("--> ") .append(request.getHttpMethod()) .append(" ") .append(getRedactedUrl(request.getUrl())) .append(System.lineSeparator()); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append("(empty body)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (shouldBodyBeLogged(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> { requestLogMessage.append(contentLength) .append("-byte body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentType, new String(bytes, StandardCharsets.UTF_8))) .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); return Mono.just(requestLogMessage.toString()); }); } else { requestLogMessage.append(contentLength) .append("-byte body: (content not logged)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } } } return requestLoggingMono.doOnNext(logger::info); } /* * Logs thr HTTP response. * * @param logger Logger used to log the response. * @param response HTTP response returned from Azure. * @param startNs Nanosecond representation of when the request was sent. * @return A Mono containing the HTTP response. */ /* * Determines if logging should be skipped. * * <p>Logging is skipped if the environment log level doesn't support logging at the informational or verbose level. * All logging in this policy occurs at the information level.</p> * * @param environmentLogLevel Log level configured in the environment at the time logging begins. * @return A flag indicating if logging should be skipped. */ private boolean shouldLoggingBeSkipped(int environmentLogLevel) { return environmentLogLevel > LogLevel.INFORMATIONAL.toNumeric(); } /* * Generates the redacted URL for logging. * * @param url URL where the request is being sent. * @return A URL with query parameters redacted based on configurations in this policy. */ private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } /* * Generates the logging safe query parameters string. * * @param queryString Query parameter string from the request URL. * @return A query parameter string redacted based on the configurations in this policy. */ private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } /* * Adds HTTP headers into the StringBuilder that is generating the log message. * * @param headers HTTP headers on the request or response. * @param sb StringBuilder that is generating the log message. * @param logLevel Log level the environment is configured to use. */ private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } /* * Determines and attempts to pretty print the body if it is JSON. * * <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p> * * @param logger Logger used to log a warning if the body fails to pretty print as JSON. * @param contentType Content-Type header. * @param body Body of the request or response. * @return The body pretty printed if it is JSON, otherwise the unmodified body. */ private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJson && contentType != null && (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } /* * Attempts to retrieve and parse the Content-Length header into a numeric representation. * * @param logger Logger used to log a warning if the Content-Length header is an invalid number. * @param headers HTTP headers that are checked for containing Content-Length. * @return */ private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; String contentLengthString = headers.getValue("Content-Length"); if (CoreUtils.isNullOrEmpty(contentLengthString)) { return contentLength; } try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } /* * Determines if the request or response body should be logged. * * <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body * isn't empty and is less than 16KB in size.</p> * * @param contentTypeHeader Content-Type header value. * @param contentLength Content-Length header represented as a numeric. * @return A flag indicating if the request or response body should be logged. */ private boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) { return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
`numericLogLevel` is a parameter that is passed into adding headers to the log message, this is done to maintain a consistent logging state while the message is being generated. This will stay for now.
private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length body" : contentLengthString + "-byte body"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append("<-- ") .append(response.getStatusCode()) .append(" ").append(getRedactedUrl(response.getRequest().getUrl())) .append(" (").append(tookMs).append(" ms, ") .append(bodySize).append(")") .append(System.lineSeparator()); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (shouldBodyBeLogged(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> { responseLogMessage.append("Response body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentTypeHeader, body)) .append(System.lineSeparator()) .append("<-- END HTTP"); return Mono.just(responseLogMessage.toString()); }).switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); }
int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric();
private Mono<HttpResponse> logResponse(final ClientLogger logger, final HttpResponse response, long startNs) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.just(response); } long tookMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs); String contentLengthString = response.getHeaderValue("Content-Length"); String bodySize = (CoreUtils.isNullOrEmpty(contentLengthString)) ? "unknown-length body" : contentLengthString + "-byte body"; StringBuilder responseLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { responseLogMessage.append("<-- ") .append(response.getStatusCode()) .append(" ") .append(getRedactedUrl(response.getRequest().getUrl())) .append(" (") .append(tookMs) .append(" ms, ") .append(bodySize) .append(")") .append(System.lineSeparator()); } addHeadersToLogMessage(response.getHeaders(), responseLogMessage, numericLogLevel); Mono<String> responseLoggingMono = Mono.defer(() -> Mono.just(responseLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { final String contentTypeHeader = response.getHeaderValue("Content-Type"); if (shouldBodyBeLogged(contentTypeHeader, getContentLength(logger, response.getHeaders()))) { final HttpResponse bufferedResponse = response.buffer(); responseLoggingMono = bufferedResponse.getBodyAsString().flatMap(body -> { responseLogMessage.append("Response body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentTypeHeader, body)) .append(System.lineSeparator()) .append("<-- END HTTP"); return Mono.just(responseLogMessage.toString()); }).switchIfEmpty(responseLoggingMono); } else { responseLogMessage.append("(body content not logged)") .append(System.lineSeparator()) .append("<-- END HTTP"); } } else { responseLogMessage.append("<-- END HTTP"); } return responseLoggingMono.doOnNext(logger::info).thenReturn(response); }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJSON; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJson If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJson) { this.prettyPrintJSON = prettyPrintJson; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(headerName -> headerName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } /* * Logs the HTTP request. * * @param logger Logger used to log the request. * @param request HTTP request being sent to Azure. * @return A Mono which will emit the string to log. */ private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append("--> ") .append(request.getHttpMethod()) .append(" ") .append(getRedactedUrl(request.getUrl())) .append(System.lineSeparator()); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append("(empty body)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (shouldBodyBeLogged(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> { requestLogMessage.append(contentLength) .append("-byte body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentType, new String(bytes, StandardCharsets.UTF_8))) .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); return Mono.just(requestLogMessage.toString()); }); } else { requestLogMessage.append(contentLength) .append("-byte body: (content not logged)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } } } return requestLoggingMono.doOnNext(logger::info); } /* * Logs thr HTTP response. * * @param logger Logger used to log the response. * @param response HTTP response returned from Azure. * @param startNs Nanosecond representation of when the request was sent. * @return A Mono containing the HTTP response. */ /* * Determines if logging should be skipped. * * <p>Logging is skipped if the environment log level doesn't support logging at the informational or verbose level. * All logging in this policy occurs at the information level.</p> * * @param environmentLogLevel Log level configured in the environment at the time logging begins. * @return A flag indicating if logging should be skipped. */ private boolean shouldLoggingBeSkipped(int environmentLogLevel) { return environmentLogLevel <= LogLevel.INFORMATIONAL.toNumeric(); } /* * Generates the redacted URL for logging. * * @param url URL where the request is being sent. * @return A URL with query parameters redacted based on configurations in this policy. */ private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } /* * Generates the logging safe query parameters string. * * @param queryString Query parameter string from the request URL. * @return A query parameter string redacted based on the configurations in this policy. */ private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } /* * Adds HTTP headers into the StringBuilder that is generating the log message. * * @param headers HTTP headers on the request or response. * @param sb StringBuilder that is generating the log message. * @param logLevel Log level the environment is configured to use. */ private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } /* * Determines and attempts to pretty print the body if it is JSON. * * <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p> * * @param logger Logger used to log a warning if the body fails to pretty print as JSON. * @param contentType Content-Type header. * @param body Body of the request or response. * @return The body pretty printed if it is JSON, otherwise the unmodified body. */ private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJSON && contentType != null && (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } /* * Attempts to retrieve and parse the Content-Length header into a numeric representation. * * @param logger Logger used to log a warning if the Content-Length header is an invalid number. * @param headers HTTP headers that are checked for containing Content-Length. * @return */ private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; String contentLengthString = headers.getValue("Content-Length"); if (CoreUtils.isNullOrEmpty(contentLengthString)) { return contentLength; } try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } /* * Determines if the request or response body should be logged. * * <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body * isn't empty and is less than 16KB in size.</p> * * @param contentTypeHeader Content-Type header value. * @param contentLength Content-Length header represented as a numeric. * @return A flag indicating if the request or response body should be logged. */ private boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) { return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
class HttpLoggingPolicy implements HttpPipelinePolicy { private static final ObjectMapper PRETTY_PRINTER = new ObjectMapper().enable(SerializationFeature.INDENT_OUTPUT); private static final int MAX_BODY_LOG_SIZE = 1024 * 16; private static final String REDACTED_PLACEHOLDER = "REDACTED"; private final HttpLogDetailLevel httpLogDetailLevel; private final Set<String> allowedHeaderNames; private final Set<String> allowedQueryParameterNames; private final boolean prettyPrintJson; /** * Creates an HttpLoggingPolicy with the given log configurations. * * @param httpLogOptions The HTTP logging configurations. */ public HttpLoggingPolicy(HttpLogOptions httpLogOptions) { this(httpLogOptions, false); } /** * Creates an HttpLoggingPolicy with the given log configuration and pretty printing setting. * * @param httpLogOptions The HTTP logging configuration options. * @param prettyPrintJson If true, pretty prints JSON message bodies when logging. If the detailLevel does not * include body logging, this flag does nothing. */ private HttpLoggingPolicy(HttpLogOptions httpLogOptions, boolean prettyPrintJson) { this.prettyPrintJson = prettyPrintJson; if (httpLogOptions == null) { this.httpLogDetailLevel = HttpLogDetailLevel.NONE; this.allowedHeaderNames = Collections.emptySet(); this.allowedQueryParameterNames = Collections.emptySet(); } else { this.httpLogDetailLevel = httpLogOptions.getLogLevel(); this.allowedHeaderNames = httpLogOptions.getAllowedHeaderNames() .stream() .map(headerName -> headerName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); this.allowedQueryParameterNames = httpLogOptions.getAllowedQueryParamNames() .stream() .map(queryParamName -> queryParamName.toLowerCase(Locale.ROOT)) .collect(Collectors.toSet()); } } @Override public Mono<HttpResponse> process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { if (httpLogDetailLevel == HttpLogDetailLevel.NONE) { return next.process(); } final ClientLogger logger = new ClientLogger((String) context.getData("caller-method").orElse("")); final long startNs = System.nanoTime(); return logRequest(logger, context.getHttpRequest()) .then(next.process()) .flatMap(response -> logResponse(logger, response, startNs)) .doOnError(throwable -> logger.warning("<-- HTTP FAILED: ", throwable)); } /* * Logs the HTTP request. * * @param logger Logger used to log the request. * @param request HTTP request being sent to Azure. * @return A Mono which will emit the string to log. */ private Mono<String> logRequest(final ClientLogger logger, final HttpRequest request) { int numericLogLevel = LoggingUtil.getEnvironmentLoggingLevel().toNumeric(); if (shouldLoggingBeSkipped(numericLogLevel)) { return Mono.empty(); } StringBuilder requestLogMessage = new StringBuilder(); if (httpLogDetailLevel.shouldLogUrl()) { requestLogMessage.append("--> ") .append(request.getHttpMethod()) .append(" ") .append(getRedactedUrl(request.getUrl())) .append(System.lineSeparator()); } addHeadersToLogMessage(request.getHeaders(), requestLogMessage, numericLogLevel); Mono<String> requestLoggingMono = Mono.defer(() -> Mono.just(requestLogMessage.toString())); if (httpLogDetailLevel.shouldLogBody()) { if (request.getBody() == null) { requestLogMessage.append("(empty body)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } else { String contentType = request.getHeaders().getValue("Content-Type"); long contentLength = getContentLength(logger, request.getHeaders()); if (shouldBodyBeLogged(contentType, contentLength)) { requestLoggingMono = FluxUtil.collectBytesInByteBufferStream(request.getBody()).flatMap(bytes -> { requestLogMessage.append(contentLength) .append("-byte body:") .append(System.lineSeparator()) .append(prettyPrintIfNeeded(logger, contentType, new String(bytes, StandardCharsets.UTF_8))) .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); return Mono.just(requestLogMessage.toString()); }); } else { requestLogMessage.append(contentLength) .append("-byte body: (content not logged)") .append(System.lineSeparator()) .append("--> END ") .append(request.getHttpMethod()) .append(System.lineSeparator()); } } } return requestLoggingMono.doOnNext(logger::info); } /* * Logs thr HTTP response. * * @param logger Logger used to log the response. * @param response HTTP response returned from Azure. * @param startNs Nanosecond representation of when the request was sent. * @return A Mono containing the HTTP response. */ /* * Determines if logging should be skipped. * * <p>Logging is skipped if the environment log level doesn't support logging at the informational or verbose level. * All logging in this policy occurs at the information level.</p> * * @param environmentLogLevel Log level configured in the environment at the time logging begins. * @return A flag indicating if logging should be skipped. */ private boolean shouldLoggingBeSkipped(int environmentLogLevel) { return environmentLogLevel > LogLevel.INFORMATIONAL.toNumeric(); } /* * Generates the redacted URL for logging. * * @param url URL where the request is being sent. * @return A URL with query parameters redacted based on configurations in this policy. */ private String getRedactedUrl(URL url) { return UrlBuilder.parse(url) .setQuery(getAllowedQueryString(url.getQuery())) .toString(); } /* * Generates the logging safe query parameters string. * * @param queryString Query parameter string from the request URL. * @return A query parameter string redacted based on the configurations in this policy. */ private String getAllowedQueryString(String queryString) { if (CoreUtils.isNullOrEmpty(queryString)) { return ""; } StringBuilder queryStringBuilder = new StringBuilder(); String[] queryParams = queryString.split("&"); for (String queryParam : queryParams) { if (queryStringBuilder.length() > 0) { queryStringBuilder.append("&"); } String[] queryPair = queryParam.split("=", 2); if (queryPair.length == 2) { String queryName = queryPair[0]; if (allowedQueryParameterNames.contains(queryName.toLowerCase(Locale.ROOT))) { queryStringBuilder.append(queryParam); } else { queryStringBuilder.append(queryPair[0]).append("=").append(REDACTED_PLACEHOLDER); } } else { queryStringBuilder.append(queryParam); } } return queryStringBuilder.toString(); } /* * Adds HTTP headers into the StringBuilder that is generating the log message. * * @param headers HTTP headers on the request or response. * @param sb StringBuilder that is generating the log message. * @param logLevel Log level the environment is configured to use. */ private void addHeadersToLogMessage(HttpHeaders headers, StringBuilder sb, int logLevel) { if (!httpLogDetailLevel.shouldLogHeaders() || logLevel > LogLevel.VERBOSE.toNumeric()) { return; } for (HttpHeader header : headers) { String headerName = header.getName(); sb.append(headerName).append(":"); if (allowedHeaderNames.contains(headerName.toLowerCase(Locale.ROOT))) { sb.append(header.getValue()); } else { sb.append(REDACTED_PLACEHOLDER); } sb.append(System.lineSeparator()); } } /* * Determines and attempts to pretty print the body if it is JSON. * * <p>The body is pretty printed if the Content-Type is JSON and the policy is configured to pretty print JSON.</p> * * @param logger Logger used to log a warning if the body fails to pretty print as JSON. * @param contentType Content-Type header. * @param body Body of the request or response. * @return The body pretty printed if it is JSON, otherwise the unmodified body. */ private String prettyPrintIfNeeded(ClientLogger logger, String contentType, String body) { String result = body; if (prettyPrintJson && contentType != null && (contentType.startsWith(ContentType.APPLICATION_JSON) || contentType.startsWith("text/json"))) { try { final Object deserialized = PRETTY_PRINTER.readTree(body); result = PRETTY_PRINTER.writeValueAsString(deserialized); } catch (Exception e) { logger.warning("Failed to pretty print JSON: {}", e.getMessage()); } } return result; } /* * Attempts to retrieve and parse the Content-Length header into a numeric representation. * * @param logger Logger used to log a warning if the Content-Length header is an invalid number. * @param headers HTTP headers that are checked for containing Content-Length. * @return */ private long getContentLength(ClientLogger logger, HttpHeaders headers) { long contentLength = 0; String contentLengthString = headers.getValue("Content-Length"); if (CoreUtils.isNullOrEmpty(contentLengthString)) { return contentLength; } try { contentLength = Long.parseLong(contentLengthString); } catch (NumberFormatException | NullPointerException e) { logger.warning("Could not parse the HTTP header content-length: '{}'.", headers.getValue("content-length"), e); } return contentLength; } /* * Determines if the request or response body should be logged. * * <p>The request or response body is logged if the Content-Type is not "application/octet-stream" and the body * isn't empty and is less than 16KB in size.</p> * * @param contentTypeHeader Content-Type header value. * @param contentLength Content-Length header represented as a numeric. * @return A flag indicating if the request or response body should be logged. */ private boolean shouldBodyBeLogged(String contentTypeHeader, long contentLength) { return !ContentType.APPLICATION_OCTET_STREAM.equalsIgnoreCase(contentTypeHeader) && contentLength != 0 && contentLength < MAX_BODY_LOG_SIZE; } }
Instead of instantiating the `CloseHandlesInfo` object here should the reduce instead just begin with an integer and then map to `CloseHandlesInfo`. That way the object only needs to be created once instead of on each item emitted in the stream. ```java .stream.reduce(0, Integer::sum).map(CloseHandlesInfo::new) ```
public CloseHandlesInfo forceCloseAllHandles(Duration timeout, Context context) { return new PagedIterable<>(shareFileAsyncClient.forceCloseAllHandlesWithOptionalTimeout(timeout, context)) .stream().reduce(new CloseHandlesInfo(0), (accu, next) -> new CloseHandlesInfo(accu.getClosedHandles() + next.getClosedHandles())); }
.stream().reduce(new CloseHandlesInfo(0),
public CloseHandlesInfo forceCloseAllHandles(Duration timeout, Context context) { return new PagedIterable<>(shareFileAsyncClient.forceCloseAllHandlesWithOptionalTimeout(timeout, context)) .stream().reduce(new CloseHandlesInfo(0), (accu, next) -> new CloseHandlesInfo(accu.getClosedHandles() + next.getClosedHandles())); }
class ShareFileClient { private final ClientLogger logger = new ClientLogger(ShareFileClient.class); private final ShareFileAsyncClient shareFileAsyncClient; /** * Creates a ShareFileClient that wraps a ShareFileAsyncClient and requests. * * @param shareFileAsyncClient ShareFileAsyncClient that is used to send requests */ ShareFileClient(ShareFileAsyncClient shareFileAsyncClient) { this.shareFileAsyncClient = shareFileAsyncClient; } /** * Get the url of the storage file client. * * @return the URL of the storage file client. */ public String getFileUrl() { return shareFileAsyncClient.getFileUrl(); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public ShareServiceVersion getServiceVersion() { return shareFileAsyncClient.getServiceVersion(); } /** * Opens a file input stream to download the file. * <p> * * @return An <code>InputStream</code> object that represents the stream to use for reading from the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileInputStream openInputStream() { return openInputStream(new ShareFileRange(0)); } /** * Opens a file input stream to download the specified range of the file. * <p> * * @param range {@link ShareFileRange} * @return An <code>InputStream</code> object that represents the stream to use for reading from the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileInputStream openInputStream(ShareFileRange range) { return new StorageFileInputStream(shareFileAsyncClient, range.getStart(), range.getEnd()); } /** * Creates and opens an output stream to write data to the file. If the file already exists on the service, it will * be overwritten. * * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileOutputStream getFileOutputStream() { return getFileOutputStream(0); } /** * Creates and opens an output stream to write data to the file. If the file already exists on the service, it will * be overwritten. * * @param offset Starting point of the upload range, if {@code null} it will start from the beginning. * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileOutputStream getFileOutputStream(long offset) { return new StorageFileOutputStream(shareFileAsyncClient, offset); } /** * Creates a file in the storage account and returns a response of {@link ShareFileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return The {@link ShareFileInfo file info} * @throws ShareStorageException If the file has already existed, the parent directory does not exist or fileName * is an invalid resource name. */ public ShareFileInfo create(long maxSize) { return createWithResponse(maxSize, null, null, null, null, null, Context.NONE).getValue(); } /** * Creates a file in the storage account and returns a response of ShareFileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers, file smb properties and metadata.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file. * @param metadata Optional name-value pairs associated with the file as metadata. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileInfo file info} and the status of creating the file. * @throws ShareStorageException If the directory has already existed, the parent directory does not exist or * directory is an invalid resource name. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. * @see <a href="https: */ public Response<ShareFileInfo> createWithResponse(long maxSize, ShareFileHttpHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata, Duration timeout, Context context) { Mono<Response<ShareFileInfo>> response = shareFileAsyncClient .createWithResponse(maxSize, httpHeaders, smbProperties, filePermission, metadata, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of copy operation. * @see <a href="https: */ public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, Duration pollInterval) { return shareFileAsyncClient.beginCopy(sourceUrl, metadata, pollInterval) .getSyncPoller(); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. */ public void abortCopy(String copyId) { abortCopyWithResponse(copyId, null, Context.NONE); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the status of aborting copy the file. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<Void> abortCopyWithResponse(String copyId, Duration timeout, Context context) { Mono<Response<Void>> response = shareFileAsyncClient.abortCopyWithResponse(copyId, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Downloads a file from the system, including its metadata and properties into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return The properties of the file. */ public ShareFileProperties downloadToFile(String downloadFilePath) { return downloadToFileWithResponse(downloadFilePath, null, null, Context.NONE).getValue(); } /** * Downloads a file from the system, including its metadata and properties into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The response of the file properties. */ public Response<ShareFileProperties> downloadToFileWithResponse(String downloadFilePath, ShareFileRange range, Duration timeout, Context context) { Mono<Response<ShareFileProperties>> response = shareFileAsyncClient.downloadToFileWithResponse(downloadFilePath, range, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.download * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} where the downloaded data will be written. * @throws NullPointerException If {@code stream} is {@code null}. */ public void download(OutputStream stream) { downloadWithResponse(stream, null, null, null, Context.NONE); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} where the downloaded data will be written. * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to * true, as long as the range is less than or equal to 4 MB in size. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the headers and response status code * @throws NullPointerException If {@code stream} is {@code null}. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileRange range, Boolean rangeGetContentMD5, Duration timeout, Context context) { Objects.requireNonNull(stream, "'stream' cannot be null."); Mono<ShareFileDownloadResponse> download = shareFileAsyncClient.downloadWithResponse(range, rangeGetContentMD5, context) .flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new ShareFileDownloadResponse(response))); return StorageImplUtils.blockWithOptionalTimeout(download, timeout); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.delete} * * <p>For more information, see the * <a href="https: * * @throws ShareStorageException If the directory doesn't exist or the file doesn't exist. */ public void delete() { deleteWithResponse(null, Context.NONE); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response that only contains headers and response status code * @throws ShareStorageException If the directory doesn't exist or the file doesn't exist. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<Void> deleteWithResponse(Duration timeout, Context context) { Mono<Response<Void>> response = shareFileAsyncClient.deleteWithResponse(context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link ShareFileProperties Storage file properties} */ public ShareFileProperties getProperties() { return getPropertiesWithResponse(null, Context.NONE).getValue(); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileProperties Storage file properties} with headers and * status code. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileProperties> getPropertiesWithResponse(Duration timeout, Context context) { Mono<Response<ShareFileProperties>> response = shareFileAsyncClient.getPropertiesWithResponse(context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setProperties * * <p>Clear the httpHeaders of the file and preserve the SMB properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setProperties * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file * @return The {@link ShareFileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public ShareFileInfo setProperties(long newFileSize, ShareFileHttpHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission) { return setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, null, Context.NONE) .getValue(); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse * * <p>Clear the httpHeaders of the file and preserve the SMB properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return Response containing the {@link ShareFileInfo file info} with headers and status code * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileInfo> setPropertiesWithResponse(long newFileSize, ShareFileHttpHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission, Duration timeout, Context context) { Mono<Response<ShareFileInfo>> response = shareFileAsyncClient .setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return The {@link ShareFileMetadataInfo file meta info} * @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys */ public ShareFileMetadataInfo setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata, null, Context.NONE).getValue(); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return Response containing the {@link ShareFileMetadataInfo file meta info} with headers and status code * @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileMetadataInfo> setMetadataWithResponse(Map<String, String> metadata, Duration timeout, Context context) { Mono<Response<ShareFileMetadataInfo>> response = shareFileAsyncClient .setMetadataWithResponse(metadata, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the * ShareFileRangeWriteType is set to clear, the value of this header must be set to zero. * @return The {@link ShareFileUploadInfo file upload info} * @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public ShareFileUploadInfo upload(InputStream data, long length) { return uploadWithResponse(data, length, 0L, null, Context.NONE).getValue(); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Starting point of the upload range, if {@code null} it will start from the beginning. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response * status code. * @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileUploadInfo> uploadWithResponse(InputStream data, long length, Long offset, Duration timeout, Context context) { return StorageImplUtils.blockWithOptionalTimeout(shareFileAsyncClient.uploadWithResponse(Utility .convertStreamToByteBuffer(data, length, (int) ShareFileAsyncClient.FILE_DEFAULT_BLOCK_SIZE), length, offset, context), timeout); } /** * Uploads a range of bytes from one file to another file. * * <p><strong>Code Samples</strong></p> * * <p>Upload a number of bytes from a file at defined source and destination offsets </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrl * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param destinationOffset Starting point of the upload range on the destination. * @param sourceOffset Starting point of the upload range on the source. * @param sourceUrl Specifies the URL of the source file. * @return The {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} */ public ShareFileUploadRangeFromUrlInfo uploadRangeFromUrl(long length, long destinationOffset, long sourceOffset, String sourceUrl) { return uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, sourceUrl, null, Context.NONE) .getValue(); } /** * Uploads a range of bytes from one file to another file. * * <p><strong>Code Samples</strong></p> * * <p>Upload a number of bytes from a file at defined source and destination offsets </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param destinationOffset Starting point of the upload range on the destination. * @param sourceOffset Starting point of the upload range on the source. * @param sourceUrl Specifies the URL of the source file. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with * headers and response status code. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(long length, long destinationOffset, long sourceOffset, String sourceUrl, Duration timeout, Context context) { Mono<Response<ShareFileUploadRangeFromUrlInfo>> response = shareFileAsyncClient.uploadRangeFromUrlWithResponse( length, destinationOffset, sourceOffset, sourceUrl, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Clears a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clears the first 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link ShareFileUploadInfo file upload info} */ public ShareFileUploadInfo clearRange(long length) { return clearRangeWithResponse(length, 0, null, Context.NONE).getValue(); } /** * Clears a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Starting point of the upload range, if {@code null} it will start from the beginning. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response * status code. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileUploadInfo> clearRangeWithResponse(long length, long offset, Duration timeout, Context context) { Mono<Response<ShareFileUploadInfo>> response = shareFileAsyncClient .clearRangeWithResponse(length, offset, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from the source file path. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadFromFile * * <p>For more information, see the * <a href="https: * and * <a href="https: * * @param uploadFilePath The path where store the source file to upload */ public void uploadFromFile(String uploadFilePath) { shareFileAsyncClient.uploadFromFile(uploadFilePath).block(); } /** * List of valid ranges for a file. * * <p><strong>Code Samples</strong></p> * * <p>List all ranges for the file client.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listRanges} * * <p>For more information, see the * <a href="https: * * @return {@link ShareFileRange ranges} in the files. */ public PagedIterable<ShareFileRange> listRanges() { return listRanges(null, null, null); } /** * List of valid ranges for a file. * * <p><strong>Code Samples</strong></p> * * <p>List all ranges within the file range from 1KB to 2KB.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listRanges * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return {@link ShareFileRange ranges} in the files that satisfy the requirements * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public PagedIterable<ShareFileRange> listRanges(ShareFileRange range, Duration timeout, Context context) { return new PagedIterable<>(shareFileAsyncClient.listRangesWithOptionalTimeout(range, timeout, context)); } /** * List of open handles on a file. * * <p><strong>Code Samples</strong></p> * * <p>List all handles for the file client.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listHandles} * * <p>For more information, see the * <a href="https: * * @return {@link HandleItem handles} in the files that satisfy the requirements */ public PagedIterable<HandleItem> listHandles() { return listHandles(null, null, Context.NONE); } /** * List of open handles on a file. * * <p><strong>Code Samples</strong></p> * * <p>List 10 handles for the file client.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listHandles * * <p>For more information, see the * <a href="https: * * @param maxResultsPerPage Optional max number of results returned per page * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return {@link HandleItem handles} in the file that satisfy the requirements * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public PagedIterable<HandleItem> listHandles(Integer maxResultsPerPage, Duration timeout, Context context) { return new PagedIterable<>(shareFileAsyncClient.listHandlesWithOptionalTimeout(maxResultsPerPage, timeout, context)); } /** * Closes a handle on the file at the service. This is intended to be used alongside {@link * * <p><strong>Code Samples</strong></p> * * <p>Force close handles returned by list handles.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.forceCloseHandle * * <p>For more information, see the * <a href="https: * * @param handleId Handle ID to be closed. * @return Information about the closed handles. */ public CloseHandlesInfo forceCloseHandle(String handleId) { return forceCloseHandleWithResponse(handleId, null, Context.NONE).getValue(); } /** * Closes a handle on the file at the service. This is intended to be used alongside {@link * * <p><strong>Code Samples</strong></p> * * <p>Force close handles returned by list handles.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.forceCloseHandleWithResponse * * <p>For more information, see the * <a href="https: * * @param handleId Handle ID to be closed. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response that contains information about the closed handles, headers and response status code. */ public Response<CloseHandlesInfo> forceCloseHandleWithResponse(String handleId, Duration timeout, Context context) { Mono<Response<CloseHandlesInfo>> response = shareFileAsyncClient .forceCloseHandleWithResponse(handleId, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Closes all handles opened on the file at the service. * * <p><strong>Code Samples</strong></p> * * <p>Force close all handles.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.forceCloseAllHandles * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return Information about the closed handles */ /** * Get snapshot id which attached to {@link ShareFileClient}. Return {@code null} if no snapshot id attached. * * <p><strong>Code Samples</strong></p> * * <p>Get the share snapshot id. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getShareSnapshotId} * * @return The snapshot id which is a unique {@code DateTime} value that identifies the share snapshot to its base * share. */ public String getShareSnapshotId() { return shareFileAsyncClient.getShareSnapshotId(); } /** * Get the share name of file client. * * <p>Get the share name. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getShareName} * * @return The share name of the file. */ public String getShareName() { return this.shareFileAsyncClient.getShareName(); } /** * Get file path of the client. * * <p>Get the file path. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getFilePath} * * @return The path of the file. */ public String getFilePath() { return this.shareFileAsyncClient.getFilePath(); } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return this.shareFileAsyncClient.getAccountName(); } }
class ShareFileClient { private final ClientLogger logger = new ClientLogger(ShareFileClient.class); private final ShareFileAsyncClient shareFileAsyncClient; /** * Creates a ShareFileClient that wraps a ShareFileAsyncClient and requests. * * @param shareFileAsyncClient ShareFileAsyncClient that is used to send requests */ ShareFileClient(ShareFileAsyncClient shareFileAsyncClient) { this.shareFileAsyncClient = shareFileAsyncClient; } /** * Get the url of the storage file client. * * @return the URL of the storage file client. */ public String getFileUrl() { return shareFileAsyncClient.getFileUrl(); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public ShareServiceVersion getServiceVersion() { return shareFileAsyncClient.getServiceVersion(); } /** * Opens a file input stream to download the file. * <p> * * @return An <code>InputStream</code> object that represents the stream to use for reading from the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileInputStream openInputStream() { return openInputStream(new ShareFileRange(0)); } /** * Opens a file input stream to download the specified range of the file. * <p> * * @param range {@link ShareFileRange} * @return An <code>InputStream</code> object that represents the stream to use for reading from the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileInputStream openInputStream(ShareFileRange range) { return new StorageFileInputStream(shareFileAsyncClient, range.getStart(), range.getEnd()); } /** * Creates and opens an output stream to write data to the file. If the file already exists on the service, it will * be overwritten. * * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileOutputStream getFileOutputStream() { return getFileOutputStream(0); } /** * Creates and opens an output stream to write data to the file. If the file already exists on the service, it will * be overwritten. * * @param offset Starting point of the upload range, if {@code null} it will start from the beginning. * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileOutputStream getFileOutputStream(long offset) { return new StorageFileOutputStream(shareFileAsyncClient, offset); } /** * Creates a file in the storage account and returns a response of {@link ShareFileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return The {@link ShareFileInfo file info} * @throws ShareStorageException If the file has already existed, the parent directory does not exist or fileName * is an invalid resource name. */ public ShareFileInfo create(long maxSize) { return createWithResponse(maxSize, null, null, null, null, null, Context.NONE).getValue(); } /** * Creates a file in the storage account and returns a response of ShareFileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers, file smb properties and metadata.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file. * @param metadata Optional name-value pairs associated with the file as metadata. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileInfo file info} and the status of creating the file. * @throws ShareStorageException If the directory has already existed, the parent directory does not exist or * directory is an invalid resource name. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. * @see <a href="https: */ public Response<ShareFileInfo> createWithResponse(long maxSize, ShareFileHttpHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata, Duration timeout, Context context) { Mono<Response<ShareFileInfo>> response = shareFileAsyncClient .createWithResponse(maxSize, httpHeaders, smbProperties, filePermission, metadata, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of copy operation. * @see <a href="https: */ public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, Duration pollInterval) { return shareFileAsyncClient.beginCopy(sourceUrl, metadata, pollInterval) .getSyncPoller(); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. */ public void abortCopy(String copyId) { abortCopyWithResponse(copyId, null, Context.NONE); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the status of aborting copy the file. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<Void> abortCopyWithResponse(String copyId, Duration timeout, Context context) { Mono<Response<Void>> response = shareFileAsyncClient.abortCopyWithResponse(copyId, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Downloads a file from the system, including its metadata and properties into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return The properties of the file. */ public ShareFileProperties downloadToFile(String downloadFilePath) { return downloadToFileWithResponse(downloadFilePath, null, null, Context.NONE).getValue(); } /** * Downloads a file from the system, including its metadata and properties into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The response of the file properties. */ public Response<ShareFileProperties> downloadToFileWithResponse(String downloadFilePath, ShareFileRange range, Duration timeout, Context context) { Mono<Response<ShareFileProperties>> response = shareFileAsyncClient.downloadToFileWithResponse(downloadFilePath, range, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.download * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} where the downloaded data will be written. * @throws NullPointerException If {@code stream} is {@code null}. */ public void download(OutputStream stream) { downloadWithResponse(stream, null, null, null, Context.NONE); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} where the downloaded data will be written. * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to * true, as long as the range is less than or equal to 4 MB in size. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the headers and response status code * @throws NullPointerException If {@code stream} is {@code null}. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileRange range, Boolean rangeGetContentMD5, Duration timeout, Context context) { Objects.requireNonNull(stream, "'stream' cannot be null."); Mono<ShareFileDownloadResponse> download = shareFileAsyncClient.downloadWithResponse(range, rangeGetContentMD5, context) .flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new ShareFileDownloadResponse(response))); return StorageImplUtils.blockWithOptionalTimeout(download, timeout); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.delete} * * <p>For more information, see the * <a href="https: * * @throws ShareStorageException If the directory doesn't exist or the file doesn't exist. */ public void delete() { deleteWithResponse(null, Context.NONE); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response that only contains headers and response status code * @throws ShareStorageException If the directory doesn't exist or the file doesn't exist. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<Void> deleteWithResponse(Duration timeout, Context context) { Mono<Response<Void>> response = shareFileAsyncClient.deleteWithResponse(context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link ShareFileProperties Storage file properties} */ public ShareFileProperties getProperties() { return getPropertiesWithResponse(null, Context.NONE).getValue(); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileProperties Storage file properties} with headers and * status code. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileProperties> getPropertiesWithResponse(Duration timeout, Context context) { Mono<Response<ShareFileProperties>> response = shareFileAsyncClient.getPropertiesWithResponse(context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setProperties * * <p>Clear the httpHeaders of the file and preserve the SMB properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setProperties * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file * @return The {@link ShareFileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public ShareFileInfo setProperties(long newFileSize, ShareFileHttpHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission) { return setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, null, Context.NONE) .getValue(); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse * * <p>Clear the httpHeaders of the file and preserve the SMB properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return Response containing the {@link ShareFileInfo file info} with headers and status code * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileInfo> setPropertiesWithResponse(long newFileSize, ShareFileHttpHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission, Duration timeout, Context context) { Mono<Response<ShareFileInfo>> response = shareFileAsyncClient .setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return The {@link ShareFileMetadataInfo file meta info} * @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys */ public ShareFileMetadataInfo setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata, null, Context.NONE).getValue(); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return Response containing the {@link ShareFileMetadataInfo file meta info} with headers and status code * @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileMetadataInfo> setMetadataWithResponse(Map<String, String> metadata, Duration timeout, Context context) { Mono<Response<ShareFileMetadataInfo>> response = shareFileAsyncClient .setMetadataWithResponse(metadata, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the * ShareFileRangeWriteType is set to clear, the value of this header must be set to zero. * @return The {@link ShareFileUploadInfo file upload info} * @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public ShareFileUploadInfo upload(InputStream data, long length) { return uploadWithResponse(data, length, 0L, null, Context.NONE).getValue(); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Starting point of the upload range, if {@code null} it will start from the beginning. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response * status code. * @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileUploadInfo> uploadWithResponse(InputStream data, long length, Long offset, Duration timeout, Context context) { return StorageImplUtils.blockWithOptionalTimeout(shareFileAsyncClient.uploadWithResponse(Utility .convertStreamToByteBuffer(data, length, (int) ShareFileAsyncClient.FILE_DEFAULT_BLOCK_SIZE), length, offset, context), timeout); } /** * Uploads a range of bytes from one file to another file. * * <p><strong>Code Samples</strong></p> * * <p>Upload a number of bytes from a file at defined source and destination offsets </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrl * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param destinationOffset Starting point of the upload range on the destination. * @param sourceOffset Starting point of the upload range on the source. * @param sourceUrl Specifies the URL of the source file. * @return The {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} */ public ShareFileUploadRangeFromUrlInfo uploadRangeFromUrl(long length, long destinationOffset, long sourceOffset, String sourceUrl) { return uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, sourceUrl, null, Context.NONE) .getValue(); } /** * Uploads a range of bytes from one file to another file. * * <p><strong>Code Samples</strong></p> * * <p>Upload a number of bytes from a file at defined source and destination offsets </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param destinationOffset Starting point of the upload range on the destination. * @param sourceOffset Starting point of the upload range on the source. * @param sourceUrl Specifies the URL of the source file. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with * headers and response status code. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(long length, long destinationOffset, long sourceOffset, String sourceUrl, Duration timeout, Context context) { Mono<Response<ShareFileUploadRangeFromUrlInfo>> response = shareFileAsyncClient.uploadRangeFromUrlWithResponse( length, destinationOffset, sourceOffset, sourceUrl, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Clears a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clears the first 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link ShareFileUploadInfo file upload info} */ public ShareFileUploadInfo clearRange(long length) { return clearRangeWithResponse(length, 0, null, Context.NONE).getValue(); } /** * Clears a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Starting point of the upload range, if {@code null} it will start from the beginning. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response * status code. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileUploadInfo> clearRangeWithResponse(long length, long offset, Duration timeout, Context context) { Mono<Response<ShareFileUploadInfo>> response = shareFileAsyncClient .clearRangeWithResponse(length, offset, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from the source file path. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadFromFile * * <p>For more information, see the * <a href="https: * and * <a href="https: * * @param uploadFilePath The path where store the source file to upload */ public void uploadFromFile(String uploadFilePath) { shareFileAsyncClient.uploadFromFile(uploadFilePath).block(); } /** * List of valid ranges for a file. * * <p><strong>Code Samples</strong></p> * * <p>List all ranges for the file client.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listRanges} * * <p>For more information, see the * <a href="https: * * @return {@link ShareFileRange ranges} in the files. */ public PagedIterable<ShareFileRange> listRanges() { return listRanges(null, null, null); } /** * List of valid ranges for a file. * * <p><strong>Code Samples</strong></p> * * <p>List all ranges within the file range from 1KB to 2KB.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listRanges * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return {@link ShareFileRange ranges} in the files that satisfy the requirements * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public PagedIterable<ShareFileRange> listRanges(ShareFileRange range, Duration timeout, Context context) { return new PagedIterable<>(shareFileAsyncClient.listRangesWithOptionalTimeout(range, timeout, context)); } /** * List of open handles on a file. * * <p><strong>Code Samples</strong></p> * * <p>List all handles for the file client.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listHandles} * * <p>For more information, see the * <a href="https: * * @return {@link HandleItem handles} in the files that satisfy the requirements */ public PagedIterable<HandleItem> listHandles() { return listHandles(null, null, Context.NONE); } /** * List of open handles on a file. * * <p><strong>Code Samples</strong></p> * * <p>List 10 handles for the file client.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listHandles * * <p>For more information, see the * <a href="https: * * @param maxResultsPerPage Optional max number of results returned per page * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return {@link HandleItem handles} in the file that satisfy the requirements * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public PagedIterable<HandleItem> listHandles(Integer maxResultsPerPage, Duration timeout, Context context) { return new PagedIterable<>(shareFileAsyncClient.listHandlesWithOptionalTimeout(maxResultsPerPage, timeout, context)); } /** * Closes a handle on the file at the service. This is intended to be used alongside {@link * * <p><strong>Code Samples</strong></p> * * <p>Force close handles returned by list handles.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.forceCloseHandle * * <p>For more information, see the * <a href="https: * * @param handleId Handle ID to be closed. * @return Information about the closed handles. */ public CloseHandlesInfo forceCloseHandle(String handleId) { return forceCloseHandleWithResponse(handleId, null, Context.NONE).getValue(); } /** * Closes a handle on the file at the service. This is intended to be used alongside {@link * * <p><strong>Code Samples</strong></p> * * <p>Force close handles returned by list handles.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.forceCloseHandleWithResponse * * <p>For more information, see the * <a href="https: * * @param handleId Handle ID to be closed. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response that contains information about the closed handles, headers and response status code. */ public Response<CloseHandlesInfo> forceCloseHandleWithResponse(String handleId, Duration timeout, Context context) { Mono<Response<CloseHandlesInfo>> response = shareFileAsyncClient .forceCloseHandleWithResponse(handleId, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Closes all handles opened on the file at the service. * * <p><strong>Code Samples</strong></p> * * <p>Force close all handles.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.forceCloseAllHandles * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return Information about the closed handles */ /** * Get snapshot id which attached to {@link ShareFileClient}. Return {@code null} if no snapshot id attached. * * <p><strong>Code Samples</strong></p> * * <p>Get the share snapshot id. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getShareSnapshotId} * * @return The snapshot id which is a unique {@code DateTime} value that identifies the share snapshot to its base * share. */ public String getShareSnapshotId() { return shareFileAsyncClient.getShareSnapshotId(); } /** * Get the share name of file client. * * <p>Get the share name. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getShareName} * * @return The share name of the file. */ public String getShareName() { return this.shareFileAsyncClient.getShareName(); } /** * Get file path of the client. * * <p>Get the file path. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getFilePath} * * @return The path of the file. */ public String getFilePath() { return this.shareFileAsyncClient.getFilePath(); } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return this.shareFileAsyncClient.getAccountName(); } }
discussed offline about this
public CloseHandlesInfo forceCloseAllHandles(Duration timeout, Context context) { return new PagedIterable<>(shareFileAsyncClient.forceCloseAllHandlesWithOptionalTimeout(timeout, context)) .stream().reduce(new CloseHandlesInfo(0), (accu, next) -> new CloseHandlesInfo(accu.getClosedHandles() + next.getClosedHandles())); }
.stream().reduce(new CloseHandlesInfo(0),
public CloseHandlesInfo forceCloseAllHandles(Duration timeout, Context context) { return new PagedIterable<>(shareFileAsyncClient.forceCloseAllHandlesWithOptionalTimeout(timeout, context)) .stream().reduce(new CloseHandlesInfo(0), (accu, next) -> new CloseHandlesInfo(accu.getClosedHandles() + next.getClosedHandles())); }
class ShareFileClient { private final ClientLogger logger = new ClientLogger(ShareFileClient.class); private final ShareFileAsyncClient shareFileAsyncClient; /** * Creates a ShareFileClient that wraps a ShareFileAsyncClient and requests. * * @param shareFileAsyncClient ShareFileAsyncClient that is used to send requests */ ShareFileClient(ShareFileAsyncClient shareFileAsyncClient) { this.shareFileAsyncClient = shareFileAsyncClient; } /** * Get the url of the storage file client. * * @return the URL of the storage file client. */ public String getFileUrl() { return shareFileAsyncClient.getFileUrl(); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public ShareServiceVersion getServiceVersion() { return shareFileAsyncClient.getServiceVersion(); } /** * Opens a file input stream to download the file. * <p> * * @return An <code>InputStream</code> object that represents the stream to use for reading from the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileInputStream openInputStream() { return openInputStream(new ShareFileRange(0)); } /** * Opens a file input stream to download the specified range of the file. * <p> * * @param range {@link ShareFileRange} * @return An <code>InputStream</code> object that represents the stream to use for reading from the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileInputStream openInputStream(ShareFileRange range) { return new StorageFileInputStream(shareFileAsyncClient, range.getStart(), range.getEnd()); } /** * Creates and opens an output stream to write data to the file. If the file already exists on the service, it will * be overwritten. * * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileOutputStream getFileOutputStream() { return getFileOutputStream(0); } /** * Creates and opens an output stream to write data to the file. If the file already exists on the service, it will * be overwritten. * * @param offset Starting point of the upload range, if {@code null} it will start from the beginning. * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileOutputStream getFileOutputStream(long offset) { return new StorageFileOutputStream(shareFileAsyncClient, offset); } /** * Creates a file in the storage account and returns a response of {@link ShareFileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return The {@link ShareFileInfo file info} * @throws ShareStorageException If the file has already existed, the parent directory does not exist or fileName * is an invalid resource name. */ public ShareFileInfo create(long maxSize) { return createWithResponse(maxSize, null, null, null, null, null, Context.NONE).getValue(); } /** * Creates a file in the storage account and returns a response of ShareFileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers, file smb properties and metadata.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file. * @param metadata Optional name-value pairs associated with the file as metadata. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileInfo file info} and the status of creating the file. * @throws ShareStorageException If the directory has already existed, the parent directory does not exist or * directory is an invalid resource name. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. * @see <a href="https: */ public Response<ShareFileInfo> createWithResponse(long maxSize, ShareFileHttpHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata, Duration timeout, Context context) { Mono<Response<ShareFileInfo>> response = shareFileAsyncClient .createWithResponse(maxSize, httpHeaders, smbProperties, filePermission, metadata, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of copy operation. * @see <a href="https: */ public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, Duration pollInterval) { return shareFileAsyncClient.beginCopy(sourceUrl, metadata, pollInterval) .getSyncPoller(); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. */ public void abortCopy(String copyId) { abortCopyWithResponse(copyId, null, Context.NONE); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the status of aborting copy the file. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<Void> abortCopyWithResponse(String copyId, Duration timeout, Context context) { Mono<Response<Void>> response = shareFileAsyncClient.abortCopyWithResponse(copyId, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Downloads a file from the system, including its metadata and properties into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return The properties of the file. */ public ShareFileProperties downloadToFile(String downloadFilePath) { return downloadToFileWithResponse(downloadFilePath, null, null, Context.NONE).getValue(); } /** * Downloads a file from the system, including its metadata and properties into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The response of the file properties. */ public Response<ShareFileProperties> downloadToFileWithResponse(String downloadFilePath, ShareFileRange range, Duration timeout, Context context) { Mono<Response<ShareFileProperties>> response = shareFileAsyncClient.downloadToFileWithResponse(downloadFilePath, range, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.download * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} where the downloaded data will be written. * @throws NullPointerException If {@code stream} is {@code null}. */ public void download(OutputStream stream) { downloadWithResponse(stream, null, null, null, Context.NONE); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} where the downloaded data will be written. * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to * true, as long as the range is less than or equal to 4 MB in size. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the headers and response status code * @throws NullPointerException If {@code stream} is {@code null}. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileRange range, Boolean rangeGetContentMD5, Duration timeout, Context context) { Objects.requireNonNull(stream, "'stream' cannot be null."); Mono<ShareFileDownloadResponse> download = shareFileAsyncClient.downloadWithResponse(range, rangeGetContentMD5, context) .flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new ShareFileDownloadResponse(response))); return StorageImplUtils.blockWithOptionalTimeout(download, timeout); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.delete} * * <p>For more information, see the * <a href="https: * * @throws ShareStorageException If the directory doesn't exist or the file doesn't exist. */ public void delete() { deleteWithResponse(null, Context.NONE); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response that only contains headers and response status code * @throws ShareStorageException If the directory doesn't exist or the file doesn't exist. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<Void> deleteWithResponse(Duration timeout, Context context) { Mono<Response<Void>> response = shareFileAsyncClient.deleteWithResponse(context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link ShareFileProperties Storage file properties} */ public ShareFileProperties getProperties() { return getPropertiesWithResponse(null, Context.NONE).getValue(); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileProperties Storage file properties} with headers and * status code. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileProperties> getPropertiesWithResponse(Duration timeout, Context context) { Mono<Response<ShareFileProperties>> response = shareFileAsyncClient.getPropertiesWithResponse(context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setProperties * * <p>Clear the httpHeaders of the file and preserve the SMB properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setProperties * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file * @return The {@link ShareFileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public ShareFileInfo setProperties(long newFileSize, ShareFileHttpHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission) { return setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, null, Context.NONE) .getValue(); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse * * <p>Clear the httpHeaders of the file and preserve the SMB properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return Response containing the {@link ShareFileInfo file info} with headers and status code * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileInfo> setPropertiesWithResponse(long newFileSize, ShareFileHttpHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission, Duration timeout, Context context) { Mono<Response<ShareFileInfo>> response = shareFileAsyncClient .setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return The {@link ShareFileMetadataInfo file meta info} * @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys */ public ShareFileMetadataInfo setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata, null, Context.NONE).getValue(); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return Response containing the {@link ShareFileMetadataInfo file meta info} with headers and status code * @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileMetadataInfo> setMetadataWithResponse(Map<String, String> metadata, Duration timeout, Context context) { Mono<Response<ShareFileMetadataInfo>> response = shareFileAsyncClient .setMetadataWithResponse(metadata, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the * ShareFileRangeWriteType is set to clear, the value of this header must be set to zero. * @return The {@link ShareFileUploadInfo file upload info} * @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public ShareFileUploadInfo upload(InputStream data, long length) { return uploadWithResponse(data, length, 0L, null, Context.NONE).getValue(); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Starting point of the upload range, if {@code null} it will start from the beginning. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response * status code. * @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileUploadInfo> uploadWithResponse(InputStream data, long length, Long offset, Duration timeout, Context context) { return StorageImplUtils.blockWithOptionalTimeout(shareFileAsyncClient.uploadWithResponse(Utility .convertStreamToByteBuffer(data, length, (int) ShareFileAsyncClient.FILE_DEFAULT_BLOCK_SIZE), length, offset, context), timeout); } /** * Uploads a range of bytes from one file to another file. * * <p><strong>Code Samples</strong></p> * * <p>Upload a number of bytes from a file at defined source and destination offsets </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrl * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param destinationOffset Starting point of the upload range on the destination. * @param sourceOffset Starting point of the upload range on the source. * @param sourceUrl Specifies the URL of the source file. * @return The {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} */ public ShareFileUploadRangeFromUrlInfo uploadRangeFromUrl(long length, long destinationOffset, long sourceOffset, String sourceUrl) { return uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, sourceUrl, null, Context.NONE) .getValue(); } /** * Uploads a range of bytes from one file to another file. * * <p><strong>Code Samples</strong></p> * * <p>Upload a number of bytes from a file at defined source and destination offsets </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param destinationOffset Starting point of the upload range on the destination. * @param sourceOffset Starting point of the upload range on the source. * @param sourceUrl Specifies the URL of the source file. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with * headers and response status code. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(long length, long destinationOffset, long sourceOffset, String sourceUrl, Duration timeout, Context context) { Mono<Response<ShareFileUploadRangeFromUrlInfo>> response = shareFileAsyncClient.uploadRangeFromUrlWithResponse( length, destinationOffset, sourceOffset, sourceUrl, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Clears a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clears the first 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link ShareFileUploadInfo file upload info} */ public ShareFileUploadInfo clearRange(long length) { return clearRangeWithResponse(length, 0, null, Context.NONE).getValue(); } /** * Clears a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Starting point of the upload range, if {@code null} it will start from the beginning. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response * status code. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileUploadInfo> clearRangeWithResponse(long length, long offset, Duration timeout, Context context) { Mono<Response<ShareFileUploadInfo>> response = shareFileAsyncClient .clearRangeWithResponse(length, offset, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from the source file path. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadFromFile * * <p>For more information, see the * <a href="https: * and * <a href="https: * * @param uploadFilePath The path where store the source file to upload */ public void uploadFromFile(String uploadFilePath) { shareFileAsyncClient.uploadFromFile(uploadFilePath).block(); } /** * List of valid ranges for a file. * * <p><strong>Code Samples</strong></p> * * <p>List all ranges for the file client.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listRanges} * * <p>For more information, see the * <a href="https: * * @return {@link ShareFileRange ranges} in the files. */ public PagedIterable<ShareFileRange> listRanges() { return listRanges(null, null, null); } /** * List of valid ranges for a file. * * <p><strong>Code Samples</strong></p> * * <p>List all ranges within the file range from 1KB to 2KB.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listRanges * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return {@link ShareFileRange ranges} in the files that satisfy the requirements * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public PagedIterable<ShareFileRange> listRanges(ShareFileRange range, Duration timeout, Context context) { return new PagedIterable<>(shareFileAsyncClient.listRangesWithOptionalTimeout(range, timeout, context)); } /** * List of open handles on a file. * * <p><strong>Code Samples</strong></p> * * <p>List all handles for the file client.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listHandles} * * <p>For more information, see the * <a href="https: * * @return {@link HandleItem handles} in the files that satisfy the requirements */ public PagedIterable<HandleItem> listHandles() { return listHandles(null, null, Context.NONE); } /** * List of open handles on a file. * * <p><strong>Code Samples</strong></p> * * <p>List 10 handles for the file client.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listHandles * * <p>For more information, see the * <a href="https: * * @param maxResultsPerPage Optional max number of results returned per page * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return {@link HandleItem handles} in the file that satisfy the requirements * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public PagedIterable<HandleItem> listHandles(Integer maxResultsPerPage, Duration timeout, Context context) { return new PagedIterable<>(shareFileAsyncClient.listHandlesWithOptionalTimeout(maxResultsPerPage, timeout, context)); } /** * Closes a handle on the file at the service. This is intended to be used alongside {@link * * <p><strong>Code Samples</strong></p> * * <p>Force close handles returned by list handles.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.forceCloseHandle * * <p>For more information, see the * <a href="https: * * @param handleId Handle ID to be closed. * @return Information about the closed handles. */ public CloseHandlesInfo forceCloseHandle(String handleId) { return forceCloseHandleWithResponse(handleId, null, Context.NONE).getValue(); } /** * Closes a handle on the file at the service. This is intended to be used alongside {@link * * <p><strong>Code Samples</strong></p> * * <p>Force close handles returned by list handles.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.forceCloseHandleWithResponse * * <p>For more information, see the * <a href="https: * * @param handleId Handle ID to be closed. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response that contains information about the closed handles, headers and response status code. */ public Response<CloseHandlesInfo> forceCloseHandleWithResponse(String handleId, Duration timeout, Context context) { Mono<Response<CloseHandlesInfo>> response = shareFileAsyncClient .forceCloseHandleWithResponse(handleId, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Closes all handles opened on the file at the service. * * <p><strong>Code Samples</strong></p> * * <p>Force close all handles.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.forceCloseAllHandles * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return Information about the closed handles */ /** * Get snapshot id which attached to {@link ShareFileClient}. Return {@code null} if no snapshot id attached. * * <p><strong>Code Samples</strong></p> * * <p>Get the share snapshot id. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getShareSnapshotId} * * @return The snapshot id which is a unique {@code DateTime} value that identifies the share snapshot to its base * share. */ public String getShareSnapshotId() { return shareFileAsyncClient.getShareSnapshotId(); } /** * Get the share name of file client. * * <p>Get the share name. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getShareName} * * @return The share name of the file. */ public String getShareName() { return this.shareFileAsyncClient.getShareName(); } /** * Get file path of the client. * * <p>Get the file path. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getFilePath} * * @return The path of the file. */ public String getFilePath() { return this.shareFileAsyncClient.getFilePath(); } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return this.shareFileAsyncClient.getAccountName(); } }
class ShareFileClient { private final ClientLogger logger = new ClientLogger(ShareFileClient.class); private final ShareFileAsyncClient shareFileAsyncClient; /** * Creates a ShareFileClient that wraps a ShareFileAsyncClient and requests. * * @param shareFileAsyncClient ShareFileAsyncClient that is used to send requests */ ShareFileClient(ShareFileAsyncClient shareFileAsyncClient) { this.shareFileAsyncClient = shareFileAsyncClient; } /** * Get the url of the storage file client. * * @return the URL of the storage file client. */ public String getFileUrl() { return shareFileAsyncClient.getFileUrl(); } /** * Gets the service version the client is using. * * @return the service version the client is using. */ public ShareServiceVersion getServiceVersion() { return shareFileAsyncClient.getServiceVersion(); } /** * Opens a file input stream to download the file. * <p> * * @return An <code>InputStream</code> object that represents the stream to use for reading from the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileInputStream openInputStream() { return openInputStream(new ShareFileRange(0)); } /** * Opens a file input stream to download the specified range of the file. * <p> * * @param range {@link ShareFileRange} * @return An <code>InputStream</code> object that represents the stream to use for reading from the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileInputStream openInputStream(ShareFileRange range) { return new StorageFileInputStream(shareFileAsyncClient, range.getStart(), range.getEnd()); } /** * Creates and opens an output stream to write data to the file. If the file already exists on the service, it will * be overwritten. * * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileOutputStream getFileOutputStream() { return getFileOutputStream(0); } /** * Creates and opens an output stream to write data to the file. If the file already exists on the service, it will * be overwritten. * * @param offset Starting point of the upload range, if {@code null} it will start from the beginning. * @return A {@link StorageFileOutputStream} object used to write data to the file. * @throws ShareStorageException If a storage service error occurred. */ public final StorageFileOutputStream getFileOutputStream(long offset) { return new StorageFileOutputStream(shareFileAsyncClient, offset); } /** * Creates a file in the storage account and returns a response of {@link ShareFileInfo} to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers and metadata.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.create} * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @return The {@link ShareFileInfo file info} * @throws ShareStorageException If the file has already existed, the parent directory does not exist or fileName * is an invalid resource name. */ public ShareFileInfo create(long maxSize) { return createWithResponse(maxSize, null, null, null, null, null, Context.NONE).getValue(); } /** * Creates a file in the storage account and returns a response of ShareFileInfo to interact with it. * * <p><strong>Code Samples</strong></p> * * <p>Create the file with length of 1024 bytes, some headers, file smb properties and metadata.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.createWithResponse * * <p>For more information, see the * <a href="https: * * @param maxSize The maximum size in bytes for the file, up to 1 TiB. * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file. * @param metadata Optional name-value pairs associated with the file as metadata. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileInfo file info} and the status of creating the file. * @throws ShareStorageException If the directory has already existed, the parent directory does not exist or * directory is an invalid resource name. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. * @see <a href="https: */ public Response<ShareFileInfo> createWithResponse(long maxSize, ShareFileHttpHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission, Map<String, String> metadata, Duration timeout, Context context) { Mono<Response<ShareFileInfo>> response = shareFileAsyncClient .createWithResponse(maxSize, httpHeaders, smbProperties, filePermission, metadata, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Copies a blob or file to a destination file within the storage account. * * <p><strong>Code Samples</strong></p> * * <p>Copy file from source getDirectoryUrl to the {@code resourcePath} </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.beginCopy * * <p>For more information, see the * <a href="https: * * @param sourceUrl Specifies the URL of the source file or blob, up to 2 KB in length. * @param metadata Optional name-value pairs associated with the file as metadata. Metadata names must adhere to the * naming rules. * @param pollInterval Duration between each poll for the copy status. If none is specified, a default of one second * is used. * @return A {@link SyncPoller} to poll the progress of copy operation. * @see <a href="https: */ public SyncPoller<ShareFileCopyInfo, Void> beginCopy(String sourceUrl, Map<String, String> metadata, Duration pollInterval) { return shareFileAsyncClient.beginCopy(sourceUrl, metadata, pollInterval) .getSyncPoller(); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.abortCopy * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. */ public void abortCopy(String copyId) { abortCopyWithResponse(copyId, null, Context.NONE); } /** * Aborts a pending Copy File operation, and leaves a destination file with zero length and full metadata. * * <p><strong>Code Samples</strong></p> * * <p>Abort copy file from copy id("someCopyId") </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.abortCopyWithResponse * * <p>For more information, see the * <a href="https: * * @param copyId Specifies the copy id which has copying pending status associate with it. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the status of aborting copy the file. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<Void> abortCopyWithResponse(String copyId, Duration timeout, Context context) { Mono<Response<Void>> response = shareFileAsyncClient.abortCopyWithResponse(copyId, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Downloads a file from the system, including its metadata and properties into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * <p>Download the file to current folder. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadToFile * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @return The properties of the file. */ public ShareFileProperties downloadToFile(String downloadFilePath) { return downloadToFileWithResponse(downloadFilePath, null, null, Context.NONE).getValue(); } /** * Downloads a file from the system, including its metadata and properties into a file specified by the path. * * <p>The file will be created and must not exist, if the file already exists a {@link FileAlreadyExistsException} * will be thrown.</p> * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes to current folder. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadToFileWithResponse * * <p>For more information, see the * <a href="https: * * @param downloadFilePath The path where store the downloaded file * @param range Optional byte range which returns file data only from the specified range. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return The response of the file properties. */ public Response<ShareFileProperties> downloadToFileWithResponse(String downloadFilePath, ShareFileRange range, Duration timeout, Context context) { Mono<Response<ShareFileProperties>> response = shareFileAsyncClient.downloadToFileWithResponse(downloadFilePath, range, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file with its metadata and properties. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.download * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} where the downloaded data will be written. * @throws NullPointerException If {@code stream} is {@code null}. */ public void download(OutputStream stream) { downloadWithResponse(stream, null, null, null, Context.NONE); } /** * Downloads a file from the system, including its metadata and properties * * <p><strong>Code Samples</strong></p> * * <p>Download the file from 1024 to 2048 bytes with its metadata and properties and without the contentMD5. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.downloadWithResponse * * <p>For more information, see the * <a href="https: * * @param stream A non-null {@link OutputStream} where the downloaded data will be written. * @param range Optional byte range which returns file data only from the specified range. * @param rangeGetContentMD5 Optional boolean which the service returns the MD5 hash for the range when it sets to * true, as long as the range is less than or equal to 4 MB in size. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the headers and response status code * @throws NullPointerException If {@code stream} is {@code null}. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public ShareFileDownloadResponse downloadWithResponse(OutputStream stream, ShareFileRange range, Boolean rangeGetContentMD5, Duration timeout, Context context) { Objects.requireNonNull(stream, "'stream' cannot be null."); Mono<ShareFileDownloadResponse> download = shareFileAsyncClient.downloadWithResponse(range, rangeGetContentMD5, context) .flatMap(response -> response.getValue().reduce(stream, (outputStream, buffer) -> { try { outputStream.write(FluxUtil.byteBufferToArray(buffer)); return outputStream; } catch (IOException ex) { throw logger.logExceptionAsError(Exceptions.propagate(new UncheckedIOException(ex))); } }).thenReturn(new ShareFileDownloadResponse(response))); return StorageImplUtils.blockWithOptionalTimeout(download, timeout); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.delete} * * <p>For more information, see the * <a href="https: * * @throws ShareStorageException If the directory doesn't exist or the file doesn't exist. */ public void delete() { deleteWithResponse(null, Context.NONE); } /** * Deletes the file associate with the client. * * <p><strong>Code Samples</strong></p> * * <p>Delete the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.deleteWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response that only contains headers and response status code * @throws ShareStorageException If the directory doesn't exist or the file doesn't exist. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<Void> deleteWithResponse(Duration timeout, Context context) { Mono<Response<Void>> response = shareFileAsyncClient.deleteWithResponse(context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getProperties} * * <p>For more information, see the * <a href="https: * * @return {@link ShareFileProperties Storage file properties} */ public ShareFileProperties getProperties() { return getPropertiesWithResponse(null, Context.NONE).getValue(); } /** * Retrieves the properties of the storage account's file. The properties includes file metadata, last modified * date, is server encrypted, and eTag. * * <p><strong>Code Samples</strong></p> * * <p>Retrieve file properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileProperties Storage file properties} with headers and * status code. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileProperties> getPropertiesWithResponse(Duration timeout, Context context) { Mono<Response<ShareFileProperties>> response = shareFileAsyncClient.getPropertiesWithResponse(context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setProperties * * <p>Clear the httpHeaders of the file and preserve the SMB properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setProperties * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file * @return The {@link ShareFileInfo file info} * @throws IllegalArgumentException thrown if parameters fail the validation. */ public ShareFileInfo setProperties(long newFileSize, ShareFileHttpHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission) { return setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, null, Context.NONE) .getValue(); } /** * Sets the user-defined httpHeaders to associate to the file. * * <p>If {@code null} is passed for the httpHeaders it will clear the httpHeaders associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the httpHeaders of contentType of "text/plain"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse * * <p>Clear the httpHeaders of the file and preserve the SMB properties</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setPropertiesWithResponse * * <p>For more information, see the * <a href="https: * * @param newFileSize New file size of the file * @param httpHeaders The user settable file http headers. * @param smbProperties The user settable file smb properties. * @param filePermission The file permission of the file * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return Response containing the {@link ShareFileInfo file info} with headers and status code * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileInfo> setPropertiesWithResponse(long newFileSize, ShareFileHttpHeaders httpHeaders, FileSmbProperties smbProperties, String filePermission, Duration timeout, Context context) { Mono<Response<ShareFileInfo>> response = shareFileAsyncClient .setPropertiesWithResponse(newFileSize, httpHeaders, smbProperties, filePermission, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadata * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadata * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @return The {@link ShareFileMetadataInfo file meta info} * @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys */ public ShareFileMetadataInfo setMetadata(Map<String, String> metadata) { return setMetadataWithResponse(metadata, null, Context.NONE).getValue(); } /** * Sets the user-defined metadata to associate to the file. * * <p>If {@code null} is passed for the metadata it will clear the metadata associated to the file.</p> * * <p><strong>Code Samples</strong></p> * * <p>Set the metadata to "file:updatedMetadata"</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse * * <p>Clear the metadata of the file</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.setMetadataWithResponse * * <p>For more information, see the * <a href="https: * * @param metadata Options.Metadata to set on the file, if null is passed the metadata for the file is cleared * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return Response containing the {@link ShareFileMetadataInfo file meta info} with headers and status code * @throws ShareStorageException If the file doesn't exist or the metadata contains invalid keys * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileMetadataInfo> setMetadataWithResponse(Map<String, String> metadata, Duration timeout, Context context) { Mono<Response<ShareFileMetadataInfo>> response = shareFileAsyncClient .setMetadataWithResponse(metadata, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Uploads a range of bytes to the beginning of a file in storage file service. Upload operations performs an * in-place write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" to the file in Storage File Service. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.upload * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. When the * ShareFileRangeWriteType is set to clear, the value of this header must be set to zero. * @return The {@link ShareFileUploadInfo file upload info} * @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) */ public ShareFileUploadInfo upload(InputStream data, long length) { return uploadWithResponse(data, length, 0L, null, Context.NONE).getValue(); } /** * Uploads a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Upload data "default" starting from 1024. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadWithResponse * * <p>For more information, see the * <a href="https: * * @param data The data which will upload to the storage file. * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Starting point of the upload range, if {@code null} it will start from the beginning. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response * status code. * @throws ShareStorageException If you attempt to upload a range that is larger than 4 MB, the service returns * status code 413 (Request Entity Too Large) * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileUploadInfo> uploadWithResponse(InputStream data, long length, Long offset, Duration timeout, Context context) { return StorageImplUtils.blockWithOptionalTimeout(shareFileAsyncClient.uploadWithResponse(Utility .convertStreamToByteBuffer(data, length, (int) ShareFileAsyncClient.FILE_DEFAULT_BLOCK_SIZE), length, offset, context), timeout); } /** * Uploads a range of bytes from one file to another file. * * <p><strong>Code Samples</strong></p> * * <p>Upload a number of bytes from a file at defined source and destination offsets </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrl * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param destinationOffset Starting point of the upload range on the destination. * @param sourceOffset Starting point of the upload range on the source. * @param sourceUrl Specifies the URL of the source file. * @return The {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} */ public ShareFileUploadRangeFromUrlInfo uploadRangeFromUrl(long length, long destinationOffset, long sourceOffset, String sourceUrl) { return uploadRangeFromUrlWithResponse(length, destinationOffset, sourceOffset, sourceUrl, null, Context.NONE) .getValue(); } /** * Uploads a range of bytes from one file to another file. * * <p><strong>Code Samples</strong></p> * * <p>Upload a number of bytes from a file at defined source and destination offsets </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadRangeFromUrlWithResponse * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param destinationOffset Starting point of the upload range on the destination. * @param sourceOffset Starting point of the upload range on the source. * @param sourceUrl Specifies the URL of the source file. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileUploadRangeFromUrlInfo file upload range from url info} with * headers and response status code. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileUploadRangeFromUrlInfo> uploadRangeFromUrlWithResponse(long length, long destinationOffset, long sourceOffset, String sourceUrl, Duration timeout, Context context) { Mono<Response<ShareFileUploadRangeFromUrlInfo>> response = shareFileAsyncClient.uploadRangeFromUrlWithResponse( length, destinationOffset, sourceOffset, sourceUrl, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Clears a range of bytes to specific of a file in storage file service. Clear operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clears the first 1024 bytes. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.clearRange * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being cleared. * @return The {@link ShareFileUploadInfo file upload info} */ public ShareFileUploadInfo clearRange(long length) { return clearRangeWithResponse(length, 0, null, Context.NONE).getValue(); } /** * Clears a range of bytes to specific of a file in storage file service. Upload operations performs an in-place * write on the specified file. * * <p><strong>Code Samples</strong></p> * * <p>Clear the range starting from 1024 with length of 1024. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.clearRangeWithResponse * * <p>For more information, see the * <a href="https: * * @param length Specifies the number of bytes being transmitted in the request body. * @param offset Starting point of the upload range, if {@code null} it will start from the beginning. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response containing the {@link ShareFileUploadInfo file upload info} with headers and response * status code. * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public Response<ShareFileUploadInfo> clearRangeWithResponse(long length, long offset, Duration timeout, Context context) { Mono<Response<ShareFileUploadInfo>> response = shareFileAsyncClient .clearRangeWithResponse(length, offset, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Uploads file to storage file service. * * <p><strong>Code Samples</strong></p> * * <p>Upload the file from the source file path. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.uploadFromFile * * <p>For more information, see the * <a href="https: * and * <a href="https: * * @param uploadFilePath The path where store the source file to upload */ public void uploadFromFile(String uploadFilePath) { shareFileAsyncClient.uploadFromFile(uploadFilePath).block(); } /** * List of valid ranges for a file. * * <p><strong>Code Samples</strong></p> * * <p>List all ranges for the file client.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listRanges} * * <p>For more information, see the * <a href="https: * * @return {@link ShareFileRange ranges} in the files. */ public PagedIterable<ShareFileRange> listRanges() { return listRanges(null, null, null); } /** * List of valid ranges for a file. * * <p><strong>Code Samples</strong></p> * * <p>List all ranges within the file range from 1KB to 2KB.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listRanges * * <p>For more information, see the * <a href="https: * * @param range Optional byte range which returns file data only from the specified range. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return {@link ShareFileRange ranges} in the files that satisfy the requirements * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public PagedIterable<ShareFileRange> listRanges(ShareFileRange range, Duration timeout, Context context) { return new PagedIterable<>(shareFileAsyncClient.listRangesWithOptionalTimeout(range, timeout, context)); } /** * List of open handles on a file. * * <p><strong>Code Samples</strong></p> * * <p>List all handles for the file client.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listHandles} * * <p>For more information, see the * <a href="https: * * @return {@link HandleItem handles} in the files that satisfy the requirements */ public PagedIterable<HandleItem> listHandles() { return listHandles(null, null, Context.NONE); } /** * List of open handles on a file. * * <p><strong>Code Samples</strong></p> * * <p>List 10 handles for the file client.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.listHandles * * <p>For more information, see the * <a href="https: * * @param maxResultsPerPage Optional max number of results returned per page * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return {@link HandleItem handles} in the file that satisfy the requirements * @throws RuntimeException if the operation doesn't complete before the timeout concludes. */ public PagedIterable<HandleItem> listHandles(Integer maxResultsPerPage, Duration timeout, Context context) { return new PagedIterable<>(shareFileAsyncClient.listHandlesWithOptionalTimeout(maxResultsPerPage, timeout, context)); } /** * Closes a handle on the file at the service. This is intended to be used alongside {@link * * <p><strong>Code Samples</strong></p> * * <p>Force close handles returned by list handles.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.forceCloseHandle * * <p>For more information, see the * <a href="https: * * @param handleId Handle ID to be closed. * @return Information about the closed handles. */ public CloseHandlesInfo forceCloseHandle(String handleId) { return forceCloseHandleWithResponse(handleId, null, Context.NONE).getValue(); } /** * Closes a handle on the file at the service. This is intended to be used alongside {@link * * <p><strong>Code Samples</strong></p> * * <p>Force close handles returned by list handles.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.forceCloseHandleWithResponse * * <p>For more information, see the * <a href="https: * * @param handleId Handle ID to be closed. * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return A response that contains information about the closed handles, headers and response status code. */ public Response<CloseHandlesInfo> forceCloseHandleWithResponse(String handleId, Duration timeout, Context context) { Mono<Response<CloseHandlesInfo>> response = shareFileAsyncClient .forceCloseHandleWithResponse(handleId, context); return StorageImplUtils.blockWithOptionalTimeout(response, timeout); } /** * Closes all handles opened on the file at the service. * * <p><strong>Code Samples</strong></p> * * <p>Force close all handles.</p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.forceCloseAllHandles * * <p>For more information, see the * <a href="https: * * @param timeout An optional timeout applied to the operation. If a response is not returned before the timeout * concludes a {@link RuntimeException} will be thrown. * @param context Additional context that is passed through the Http pipeline during the service call. * @return Information about the closed handles */ /** * Get snapshot id which attached to {@link ShareFileClient}. Return {@code null} if no snapshot id attached. * * <p><strong>Code Samples</strong></p> * * <p>Get the share snapshot id. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getShareSnapshotId} * * @return The snapshot id which is a unique {@code DateTime} value that identifies the share snapshot to its base * share. */ public String getShareSnapshotId() { return shareFileAsyncClient.getShareSnapshotId(); } /** * Get the share name of file client. * * <p>Get the share name. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getShareName} * * @return The share name of the file. */ public String getShareName() { return this.shareFileAsyncClient.getShareName(); } /** * Get file path of the client. * * <p>Get the file path. </p> * * {@codesnippet com.azure.storage.file.share.ShareFileClient.getFilePath} * * @return The path of the file. */ public String getFilePath() { return this.shareFileAsyncClient.getFilePath(); } /** * Get associated account name. * * @return account name associated with this storage resource. */ public String getAccountName() { return this.shareFileAsyncClient.getAccountName(); } }
If the exception is not an instance of RuntimeException, i.e. a checked exception, this statement `Exceptions.propagate(ex)` will throw ReactiveException wrapped around inner exception. We should make sure that we want that behavior, or we need to unwrap the exception wherever we depend on it. ``` public static RuntimeException propagate(Throwable t) { throwIfFatal(t); if (t instanceof RuntimeException) { return (RuntimeException) t; } return new ReactiveException(t); } ```
CosmosItemResponse mapItemResponseAndBlock(Mono<CosmosAsyncItemResponse> itemMono) throws CosmosClientException { try { return itemMono .map(this::convertResponse) .block(); } catch (Exception ex) { final Throwable throwable = Exceptions.unwrap(ex); if (throwable instanceof CosmosClientException) { throw (CosmosClientException) throwable; } else { throw Exceptions.propagate(ex); } } }
throw Exceptions.propagate(ex);
CosmosItemResponse mapItemResponseAndBlock(Mono<CosmosAsyncItemResponse> itemMono) throws CosmosClientException { try { return itemMono .map(this::convertResponse) .block(); } catch (Exception ex) { final Throwable throwable = Exceptions.unwrap(ex); if (throwable instanceof CosmosClientException) { throw (CosmosClientException) throwable; } else { throw ex; } } }
class CosmosContainer { private final ClientLogger logger = new ClientLogger(CosmosContainer.class); private final CosmosAsyncContainer containerWrapper; private final CosmosDatabase database; private final String id; private CosmosScripts scripts; /** * Instantiates a new Cosmos sync container. * * @param id the id * @param database the database * @param container the container */ CosmosContainer(String id, CosmosDatabase database, CosmosAsyncContainer container) { this.id = id; this.database = database; this.containerWrapper = container; } /** * Id string. * * @return the string */ public String getId() { return id; } /** * Read cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read()); } /** * Read cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read(options)); } /** * Delete cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete(options)); } /** * Delete cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete()); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties)); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties, CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties, options)); } /** * Read provisioned throughput integer. * * @return the integer. null response indicates database doesn't have any provisioned RUs * @throws CosmosClientException the cosmos client exception */ public Integer readProvisionedThroughput() throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper.readProvisionedThroughput()); } /** * Replace provisioned throughput integer. * * @param requestUnitsPerSecond the request units per second * @return the integer * @throws CosmosClientException the cosmos client exception */ public Integer replaceProvisionedThroughput(int requestUnitsPerSecond) throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper .replaceProvisionedThroughput(requestUnitsPerSecond)); } /* CosmosAsyncItem operations */ /** * Create item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item)); } /** * Create item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Upsert item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.upsertItem(item)); } /** * Upsert item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Map item response and block cosmos sync item response. * * @param itemMono the item mono * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ /** * Read all items iterator. * * @param <T> the type parameter * @param options the options * @param klass the klass * @return the iterator */ public <T> Iterator<FeedResponse<T>> readAllItems(FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.readAllItems(options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param query the query * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(String query, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(query, options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param querySpec the query spec * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(SqlQuerySpec querySpec, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(querySpec, options, klass)); } /** * Query change feed items iterator. * * @param changeFeedOptions the change feed options * @return the iterator */ public Iterator<FeedResponse<CosmosItemProperties>> queryChangeFeedItems(ChangeFeedOptions changeFeedOptions) { return getFeedIterator(this.containerWrapper.queryChangeFeedItems(changeFeedOptions)); } /** * Gets item. * * @param id the id * @param partitionKey the partition key * @return the item */ public CosmosItem getItem(String id, Object partitionKey) { return new CosmosItem(id, partitionKey, this, containerWrapper.getItem(id, partitionKey)); } /** * Gets the cosmos sync scripts. * * @return the cosmos sync scripts */ public CosmosScripts getScripts() { if (this.scripts == null) { this.scripts = new CosmosScripts(this, containerWrapper.getScripts()); } return this.scripts; } /** * Convert response cosmos sync item response. * * @param response the cosmos item response * @return the cosmos sync item response */ private CosmosItemResponse convertResponse(CosmosAsyncItemResponse response) { return new CosmosItemResponse(response, null, this); } private <T> Iterator<FeedResponse<T>> getFeedIterator(Flux<FeedResponse<T>> itemFlux) { return itemFlux.toIterable(1).iterator(); } }
class CosmosContainer { private final CosmosAsyncContainer containerWrapper; private final CosmosDatabase database; private final String id; private CosmosScripts scripts; /** * Instantiates a new Cosmos sync container. * * @param id the id * @param database the database * @param container the container */ CosmosContainer(String id, CosmosDatabase database, CosmosAsyncContainer container) { this.id = id; this.database = database; this.containerWrapper = container; } /** * Id string. * * @return the string */ public String getId() { return id; } /** * Read cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read()); } /** * Read cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read(options)); } /** * Delete cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete(options)); } /** * Delete cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete()); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties)); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties, CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties, options)); } /** * Read provisioned throughput integer. * * @return the integer. null response indicates database doesn't have any provisioned RUs * @throws CosmosClientException the cosmos client exception */ public Integer readProvisionedThroughput() throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper.readProvisionedThroughput()); } /** * Replace provisioned throughput integer. * * @param requestUnitsPerSecond the request units per second * @return the integer * @throws CosmosClientException the cosmos client exception */ public Integer replaceProvisionedThroughput(int requestUnitsPerSecond) throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper .replaceProvisionedThroughput(requestUnitsPerSecond)); } /* CosmosAsyncItem operations */ /** * Create item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item)); } /** * Create item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Upsert item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.upsertItem(item)); } /** * Upsert item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Map item response and block cosmos sync item response. * * @param itemMono the item mono * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ /** * Read all items iterator. * * @param <T> the type parameter * @param options the options * @param klass the klass * @return the iterator */ public <T> Iterator<FeedResponse<T>> readAllItems(FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.readAllItems(options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param query the query * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(String query, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(query, options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param querySpec the query spec * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(SqlQuerySpec querySpec, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(querySpec, options, klass)); } /** * Query change feed items iterator. * * @param changeFeedOptions the change feed options * @return the iterator */ public Iterator<FeedResponse<CosmosItemProperties>> queryChangeFeedItems(ChangeFeedOptions changeFeedOptions) { return getFeedIterator(this.containerWrapper.queryChangeFeedItems(changeFeedOptions)); } /** * Gets item. * * @param id the id * @param partitionKey the partition key * @return the item */ public CosmosItem getItem(String id, Object partitionKey) { return new CosmosItem(id, partitionKey, this, containerWrapper.getItem(id, partitionKey)); } /** * Gets the cosmos sync scripts. * * @return the cosmos sync scripts */ public CosmosScripts getScripts() { if (this.scripts == null) { this.scripts = new CosmosScripts(this, containerWrapper.getScripts()); } return this.scripts; } /** * Convert response cosmos sync item response. * * @param response the cosmos item response * @return the cosmos sync item response */ private CosmosItemResponse convertResponse(CosmosAsyncItemResponse response) { return new CosmosItemResponse(response, null, this); } private <T> Iterator<FeedResponse<T>> getFeedIterator(Flux<FeedResponse<T>> itemFlux) { return itemFlux.toIterable(1).iterator(); } }
instead of `break`, can we return `value` here? I think it will make things more clear. We can add a `else` block at the end, and just return `value` from else condition.
static Object getValue(JsonNode value) { if (value.isValueNode()) { switch (value.getNodeType()) { case BOOLEAN: return value.asBoolean(); case NUMBER: if (value.isInt()) { return value.asInt(); } else if (value.isLong()) { return value.asLong(); } else if (value.isDouble()) { return value.asDouble(); } break; case STRING: return value.asText(); default: throw new IllegalStateException("Unexpected value: " + value.getNodeType()); } } return value; }
break;
static Object getValue(JsonNode value) { if (value.isValueNode()) { switch (value.getNodeType()) { case BOOLEAN: return value.asBoolean(); case NUMBER: if (value.isInt()) { return value.asInt(); } else if (value.isLong()) { return value.asLong(); } else if (value.isDouble()) { return value.asDouble(); } else{ return value; } case STRING: return value.asText(); default: throw new IllegalStateException("Unexpected value: " + value.getNodeType()); } } return value; }
class JsonSerializable { private static final ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper(); private final static Logger logger = LoggerFactory.getLogger(JsonSerializable.class); transient ObjectNode propertyBag = null; private ObjectMapper om; protected JsonSerializable() { this.propertyBag = OBJECT_MAPPER.createObjectNode(); } /** * Constructor. * * @param jsonString the json string that represents the JsonSerializable. * @param objectMapper the custom object mapper */ JsonSerializable(String jsonString, ObjectMapper objectMapper) { this.propertyBag = fromJson(jsonString); this.om = objectMapper; } /** * Constructor. * * @param jsonString the json string that represents the JsonSerializable. */ protected JsonSerializable(String jsonString) { this.propertyBag = fromJson(jsonString); } /** * Constructor. * * @param objectNode the {@link ObjectNode} that represent the {@link JsonSerializable} */ JsonSerializable(ObjectNode objectNode) { this.propertyBag = objectNode; } private static void checkForValidPOJO(Class<?> c) { if (c.isAnonymousClass() || c.isLocalClass()) { throw new IllegalArgumentException( String.format("%s can't be an anonymous or local class.", c.getName())); } if (c.isMemberClass() && !Modifier.isStatic(c.getModifiers())) { throw new IllegalArgumentException( String.format("%s must be static if it's a member class.", c.getName())); } } private ObjectMapper getMapper() { if (this.om != null) { return this.om; } return OBJECT_MAPPER; } void setMapper(ObjectMapper om) { this.om = om; } @JsonIgnore protected Logger getLogger() { return logger; } void populatePropertyBag() { } /** * Returns the propertybag(JSONObject) in a hashMap * * @return the HashMap. */ public Map<String, Object> getMap() { return getMapper().convertValue(this.propertyBag, HashMap.class); } /** * Checks whether a property exists. * * @param propertyName the property to look up. * @return true if the property exists. */ public boolean has(String propertyName) { return this.propertyBag.has(propertyName); } /** * Removes a value by propertyName. * * @param propertyName the property to remove. */ void remove(String propertyName) { this.propertyBag.remove(propertyName); } /** * Sets the value of a property. * * @param <T> the type of the object. * @param propertyName the property to set. * @param value the value of the property. */ @SuppressWarnings({"unchecked", "rawtypes"}) <T> void set(String propertyName, T value) { if (value == null) { this.propertyBag.putNull(propertyName); } else if (value instanceof Collection) { ArrayNode jsonArray = propertyBag.arrayNode(); this.internalSetCollection(propertyName, (Collection) value, jsonArray); this.propertyBag.set(propertyName, jsonArray); } else if (value instanceof JsonNode) { this.propertyBag.set(propertyName, (JsonNode) value); } else if (value instanceof JsonSerializable) { JsonSerializable castedValue = (JsonSerializable) value; if (castedValue != null) { castedValue.populatePropertyBag(); } this.propertyBag.set(propertyName, castedValue != null ? castedValue.propertyBag : null); } else { this.propertyBag.set(propertyName, getMapper().valueToTree(value)); } } @SuppressWarnings({"unchecked", "rawtypes"}) private <T> void internalSetCollection(String propertyName, Collection<T> collection, ArrayNode targetArray) { for (T childValue : collection) { if (childValue == null) { targetArray.addNull(); } else if (childValue instanceof Collection) { ArrayNode childArray = targetArray.addArray(); this.internalSetCollection(propertyName, (Collection) childValue, childArray); } else if (childValue instanceof JsonNode) { targetArray.add((JsonNode) childValue); } else if (childValue instanceof JsonSerializable) { JsonSerializable castedValue = (JsonSerializable) childValue; castedValue.populatePropertyBag(); targetArray.add(castedValue.propertyBag != null ? castedValue.propertyBag : this.getMapper().createObjectNode()); } else { targetArray.add(this.getMapper().valueToTree(childValue)); } } } /** * Gets a property value as Object. * * @param propertyName the property to get. * @return the value of the property. */ public Object get(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return getValue(this.propertyBag.get(propertyName)); } else { return null; } } /** * Gets a string value. * * @param propertyName the property to get. * @return the string value. */ public String getString(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return this.propertyBag.get(propertyName).asText(); } else { return null; } } /** * Gets a boolean value. * * @param propertyName the property to get. * @return the boolean value. */ public Boolean getBoolean(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return this.propertyBag.get(propertyName).asBoolean(); } else { return null; } } /** * Gets an integer value. * * @param propertyName the property to get. * @return the boolean value */ public Integer getInt(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return Integer.valueOf(this.propertyBag.get(propertyName).asInt()); } else { return null; } } /** * Gets a long value. * * @param propertyName the property to get. * @return the long value */ public Long getLong(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return Long.valueOf(this.propertyBag.get(propertyName).asLong()); } else { return null; } } /** * Gets a double value. * * @param propertyName the property to get. * @return the double value. */ public Double getDouble(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return new Double(this.propertyBag.get(propertyName).asDouble()); } else { return null; } } /** * Gets an object value. * * @param <T> the type of the object. * @param propertyName the property to get. * @param c the class of the object. If c is a POJO class, it must be a member (and not an anonymous or local) * and a static one. * @param convertFromCamelCase boolean indicating if String should be converted from camel case to upper case * separated by underscore, * before converting to required class. * @return the object value. * @throws IllegalStateException thrown if an error occurs */ public <T> T getObject(String propertyName, Class<T> c, boolean... convertFromCamelCase) { if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { JsonNode jsonObj = propertyBag.get(propertyName); if (Number.class.isAssignableFrom(c) || String.class.isAssignableFrom(c) || Boolean.class.isAssignableFrom(c) || Object.class == c) { return c.cast(getValue(jsonObj)); } else if (Enum.class.isAssignableFrom(c)) { try { String value = String.class.cast(getValue(jsonObj)); value = convertFromCamelCase.length > 0 && convertFromCamelCase[0] ? Strings.fromCamelCaseToUpperCase(value) : value; return c.cast(c.getMethod("valueOf", String.class).invoke(null, value)); } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException("Failed to create enum.", e); } } else if (JsonSerializable.class.isAssignableFrom(c)) { try { Constructor<T> constructor = c.getDeclaredConstructor(String.class); if (Modifier.isPrivate(constructor.getModifiers())) { constructor.setAccessible(true); } return constructor.newInstance(toJson(jsonObj)); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException( "Failed to instantiate class object.", e); } } else { JsonSerializable.checkForValidPOJO(c); try { return this.getMapper().treeToValue(jsonObj, c); } catch (IOException e) { throw new IllegalStateException("Failed to get POJO.", e); } } } return null; } /** * Gets an object List. * * @param <T> the type of the objects in the List. * @param propertyName the property to get * @param c the class of the object. If c is a POJO class, it must be a member (and not an anonymous or local) * and a static one. * @param convertFromCamelCase boolean indicating if String should be converted from camel case to upper case * separated by underscore, * before converting to required class. * @return the object collection. * @throws IllegalStateException thrown if an error occurs */ public <T> List<T> getList(String propertyName, Class<T> c, boolean... convertFromCamelCase) { if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { ArrayNode jsonArray = (ArrayNode) this.propertyBag.get(propertyName); ArrayList<T> result = new ArrayList<T>(); boolean isBaseClass = false; boolean isEnumClass = false; boolean isJsonSerializable = false; if (Number.class.isAssignableFrom(c) || String.class.isAssignableFrom(c) || Boolean.class.isAssignableFrom(c) || Object.class == c) { isBaseClass = true; } else if (Enum.class.isAssignableFrom(c)) { isEnumClass = true; } else if (JsonSerializable.class.isAssignableFrom(c)) { isJsonSerializable = true; } else { JsonSerializable.checkForValidPOJO(c); } for (JsonNode n : jsonArray) { if (isBaseClass) { result.add(c.cast(getValue(n))); } else if (isEnumClass) { try { String value = String.class.cast(getValue(n)); value = convertFromCamelCase.length > 0 && convertFromCamelCase[0] ? Strings.fromCamelCaseToUpperCase(value) : value; result.add(c.cast(c.getMethod("valueOf", String.class).invoke(null, value))); } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException("Failed to create enum.", e); } } else if (isJsonSerializable) { try { Constructor<T> constructor = c.getDeclaredConstructor(String.class); if (Modifier.isPrivate(constructor.getModifiers())) { constructor.setAccessible(true); } result.add(constructor.newInstance(toJson(n))); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException( "Failed to instantiate class object.", e); } } else { try { result.add(this.getMapper().treeToValue(n, c)); } catch (IOException e) { throw new IllegalStateException("Failed to get POJO.", e); } } } return result; } return null; } /** * Gets an object collection. * * @param <T> the type of the objects in the collection. * @param propertyName the property to get * @param c the class of the object. If c is a POJO class, it must be a member (and not an anonymous or local) * and a static one. * @param convertFromCamelCase boolean indicating if String should be converted from camel case to upper case * separated by underscore, * before converting to required class. * @return the object collection. */ public <T> Collection<T> getCollection(String propertyName, Class<T> c, boolean... convertFromCamelCase) { return getList(propertyName, c, convertFromCamelCase); } /** * Gets a JSONObject. * * @param propertyName the property to get. * @return the JSONObject. */ ObjectNode getObject(String propertyName) { if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { ObjectNode jsonObj = (ObjectNode) this.propertyBag.get(propertyName); return jsonObj; } return null; } /** * Gets a JSONObject collection. * * @param propertyName the property to get. * @return the JSONObject collection. */ Collection<ObjectNode> getCollection(String propertyName) { Collection<ObjectNode> result = null; if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { result = new ArrayList<ObjectNode>(); for (JsonNode n : this.propertyBag.findValues(propertyName)) { result.add((ObjectNode) n); } } return result; } /** * Gets the value of a property identified by an array of property names that forms the path. * * @param propertyNames that form the path to the property to get. * @return the value of the property. */ public Object getObjectByPath(List<String> propertyNames) { ObjectNode propBag = this.propertyBag; JsonNode value = null; String propertyName = null; Integer matchedProperties = 0; Iterator<String> iterator = propertyNames.iterator(); if (iterator.hasNext()) { do { propertyName = iterator.next(); if (propBag.has(propertyName)) { matchedProperties++; value = propBag.get(propertyName); if (!value.isObject()) { break; } propBag = (ObjectNode) value; } else { break; } } while (iterator.hasNext()); if (value != null && matchedProperties == propertyNames.size()) { return getValue(value); } } return null; } private ObjectNode fromJson(String json) { try { return (ObjectNode) getMapper().readTree(json); } catch (IOException e) { throw new IllegalArgumentException( String.format("Unable to parse JSON %s", json), e); } } private String toJson(Object object) { try { return getMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException("Unable to convert JSON to STRING", e); } } private String toPrettyJson(Object object) { try { return getMapper().writerWithDefaultPrettyPrinter().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException("Unable to convert JSON to STRING", e); } } /** * Converts to an Object (only POJOs and JSONObject are supported). * * @param <T> the type of the object. * @param c the class of the object, either a POJO class or JSONObject. If c is a POJO class, it must be a member * (and not an anonymous or local) and a static one. * @return the POJO. * @throws IllegalArgumentException thrown if an error occurs */ public <T> T toObject(Class<T> c) { if (CosmosItemProperties.class.isAssignableFrom(c)) { return (T) new CosmosItemProperties(this.toJson()); } if (JsonSerializable.class.isAssignableFrom(c) || String.class.isAssignableFrom(c) || Number.class.isAssignableFrom(c) || Boolean.class.isAssignableFrom(c)) { return c.cast(this.get(Constants.Properties.VALUE)); } if (List.class.isAssignableFrom(c)) { Object o = this.get(Constants.Properties.VALUE); try { return this.getMapper().readValue(o.toString(), c); } catch (IOException e) { throw new IllegalStateException("Failed to convert to collection.", e); } } if (ObjectNode.class.isAssignableFrom(c)) { if (ObjectNode.class != c) { throw new IllegalArgumentException( "We support JSONObject but not its sub-classes."); } return c.cast(this.propertyBag); } else { JsonSerializable.checkForValidPOJO(c); try { return this.getMapper().readValue(this.toJson(), c); } catch (IOException e) { throw new IllegalStateException("Failed to get POJO.", e); } } } /** * Converts to a JSON string. * * @return the JSON string. */ public String toJson() { return this.toJson(SerializationFormattingPolicy.NONE); } /** * Converts to a JSON string. * * @param formattingPolicy the formatting policy to be used. * @return the JSON string. */ public String toJson(SerializationFormattingPolicy formattingPolicy) { this.populatePropertyBag(); if (SerializationFormattingPolicy.INDENTED.equals(formattingPolicy)) { return toPrettyJson(propertyBag); } else { return toJson(propertyBag); } } /** * Gets Simple STRING representation of property bag. * <p> * For proper conversion to json and inclusion of the default values * use {@link * * @return string representation of property bag. */ public String toString() { return toJson(propertyBag); } }
class JsonSerializable { private static final ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper(); private final static Logger logger = LoggerFactory.getLogger(JsonSerializable.class); transient ObjectNode propertyBag = null; private ObjectMapper om; protected JsonSerializable() { this.propertyBag = OBJECT_MAPPER.createObjectNode(); } /** * Constructor. * * @param jsonString the json string that represents the JsonSerializable. * @param objectMapper the custom object mapper */ JsonSerializable(String jsonString, ObjectMapper objectMapper) { this.propertyBag = fromJson(jsonString); this.om = objectMapper; } /** * Constructor. * * @param jsonString the json string that represents the JsonSerializable. */ protected JsonSerializable(String jsonString) { this.propertyBag = fromJson(jsonString); } /** * Constructor. * * @param objectNode the {@link ObjectNode} that represent the {@link JsonSerializable} */ JsonSerializable(ObjectNode objectNode) { this.propertyBag = objectNode; } private static void checkForValidPOJO(Class<?> c) { if (c.isAnonymousClass() || c.isLocalClass()) { throw new IllegalArgumentException( String.format("%s can't be an anonymous or local class.", c.getName())); } if (c.isMemberClass() && !Modifier.isStatic(c.getModifiers())) { throw new IllegalArgumentException( String.format("%s must be static if it's a member class.", c.getName())); } } private ObjectMapper getMapper() { if (this.om != null) { return this.om; } return OBJECT_MAPPER; } void setMapper(ObjectMapper om) { this.om = om; } @JsonIgnore protected Logger getLogger() { return logger; } void populatePropertyBag() { } /** * Returns the propertybag(JSONObject) in a hashMap * * @return the HashMap. */ public Map<String, Object> getMap() { return getMapper().convertValue(this.propertyBag, HashMap.class); } /** * Checks whether a property exists. * * @param propertyName the property to look up. * @return true if the property exists. */ public boolean has(String propertyName) { return this.propertyBag.has(propertyName); } /** * Removes a value by propertyName. * * @param propertyName the property to remove. */ void remove(String propertyName) { this.propertyBag.remove(propertyName); } /** * Sets the value of a property. * * @param <T> the type of the object. * @param propertyName the property to set. * @param value the value of the property. */ @SuppressWarnings({"unchecked", "rawtypes"}) <T> void set(String propertyName, T value) { if (value == null) { this.propertyBag.putNull(propertyName); } else if (value instanceof Collection) { ArrayNode jsonArray = propertyBag.arrayNode(); this.internalSetCollection(propertyName, (Collection) value, jsonArray); this.propertyBag.set(propertyName, jsonArray); } else if (value instanceof JsonNode) { this.propertyBag.set(propertyName, (JsonNode) value); } else if (value instanceof JsonSerializable) { JsonSerializable castedValue = (JsonSerializable) value; if (castedValue != null) { castedValue.populatePropertyBag(); } this.propertyBag.set(propertyName, castedValue != null ? castedValue.propertyBag : null); } else { this.propertyBag.set(propertyName, getMapper().valueToTree(value)); } } @SuppressWarnings({"unchecked", "rawtypes"}) private <T> void internalSetCollection(String propertyName, Collection<T> collection, ArrayNode targetArray) { for (T childValue : collection) { if (childValue == null) { targetArray.addNull(); } else if (childValue instanceof Collection) { ArrayNode childArray = targetArray.addArray(); this.internalSetCollection(propertyName, (Collection) childValue, childArray); } else if (childValue instanceof JsonNode) { targetArray.add((JsonNode) childValue); } else if (childValue instanceof JsonSerializable) { JsonSerializable castedValue = (JsonSerializable) childValue; castedValue.populatePropertyBag(); targetArray.add(castedValue.propertyBag != null ? castedValue.propertyBag : this.getMapper().createObjectNode()); } else { targetArray.add(this.getMapper().valueToTree(childValue)); } } } /** * Gets a property value as Object. * * @param propertyName the property to get. * @return the value of the property. */ public Object get(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return getValue(this.propertyBag.get(propertyName)); } else { return null; } } /** * Gets a string value. * * @param propertyName the property to get. * @return the string value. */ public String getString(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return this.propertyBag.get(propertyName).asText(); } else { return null; } } /** * Gets a boolean value. * * @param propertyName the property to get. * @return the boolean value. */ public Boolean getBoolean(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return this.propertyBag.get(propertyName).asBoolean(); } else { return null; } } /** * Gets an integer value. * * @param propertyName the property to get. * @return the boolean value */ public Integer getInt(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return Integer.valueOf(this.propertyBag.get(propertyName).asInt()); } else { return null; } } /** * Gets a long value. * * @param propertyName the property to get. * @return the long value */ public Long getLong(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return Long.valueOf(this.propertyBag.get(propertyName).asLong()); } else { return null; } } /** * Gets a double value. * * @param propertyName the property to get. * @return the double value. */ public Double getDouble(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return new Double(this.propertyBag.get(propertyName).asDouble()); } else { return null; } } /** * Gets an object value. * * @param <T> the type of the object. * @param propertyName the property to get. * @param c the class of the object. If c is a POJO class, it must be a member (and not an anonymous or local) * and a static one. * @param convertFromCamelCase boolean indicating if String should be converted from camel case to upper case * separated by underscore, * before converting to required class. * @return the object value. * @throws IllegalStateException thrown if an error occurs */ public <T> T getObject(String propertyName, Class<T> c, boolean... convertFromCamelCase) { if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { JsonNode jsonObj = propertyBag.get(propertyName); if (Number.class.isAssignableFrom(c) || String.class.isAssignableFrom(c) || Boolean.class.isAssignableFrom(c) || Object.class == c) { return c.cast(getValue(jsonObj)); } else if (Enum.class.isAssignableFrom(c)) { try { String value = String.class.cast(getValue(jsonObj)); value = convertFromCamelCase.length > 0 && convertFromCamelCase[0] ? Strings.fromCamelCaseToUpperCase(value) : value; return c.cast(c.getMethod("valueOf", String.class).invoke(null, value)); } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException("Failed to create enum.", e); } } else if (JsonSerializable.class.isAssignableFrom(c)) { try { Constructor<T> constructor = c.getDeclaredConstructor(String.class); if (Modifier.isPrivate(constructor.getModifiers())) { constructor.setAccessible(true); } return constructor.newInstance(toJson(jsonObj)); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException( "Failed to instantiate class object.", e); } } else { JsonSerializable.checkForValidPOJO(c); try { return this.getMapper().treeToValue(jsonObj, c); } catch (IOException e) { throw new IllegalStateException("Failed to get POJO.", e); } } } return null; } /** * Gets an object List. * * @param <T> the type of the objects in the List. * @param propertyName the property to get * @param c the class of the object. If c is a POJO class, it must be a member (and not an anonymous or local) * and a static one. * @param convertFromCamelCase boolean indicating if String should be converted from camel case to upper case * separated by underscore, * before converting to required class. * @return the object collection. * @throws IllegalStateException thrown if an error occurs */ public <T> List<T> getList(String propertyName, Class<T> c, boolean... convertFromCamelCase) { if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { ArrayNode jsonArray = (ArrayNode) this.propertyBag.get(propertyName); ArrayList<T> result = new ArrayList<T>(); boolean isBaseClass = false; boolean isEnumClass = false; boolean isJsonSerializable = false; if (Number.class.isAssignableFrom(c) || String.class.isAssignableFrom(c) || Boolean.class.isAssignableFrom(c) || Object.class == c) { isBaseClass = true; } else if (Enum.class.isAssignableFrom(c)) { isEnumClass = true; } else if (JsonSerializable.class.isAssignableFrom(c)) { isJsonSerializable = true; } else { JsonSerializable.checkForValidPOJO(c); } for (JsonNode n : jsonArray) { if (isBaseClass) { result.add(c.cast(getValue(n))); } else if (isEnumClass) { try { String value = String.class.cast(getValue(n)); value = convertFromCamelCase.length > 0 && convertFromCamelCase[0] ? Strings.fromCamelCaseToUpperCase(value) : value; result.add(c.cast(c.getMethod("valueOf", String.class).invoke(null, value))); } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException("Failed to create enum.", e); } } else if (isJsonSerializable) { try { Constructor<T> constructor = c.getDeclaredConstructor(String.class); if (Modifier.isPrivate(constructor.getModifiers())) { constructor.setAccessible(true); } result.add(constructor.newInstance(toJson(n))); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException( "Failed to instantiate class object.", e); } } else { try { result.add(this.getMapper().treeToValue(n, c)); } catch (IOException e) { throw new IllegalStateException("Failed to get POJO.", e); } } } return result; } return null; } /** * Gets an object collection. * * @param <T> the type of the objects in the collection. * @param propertyName the property to get * @param c the class of the object. If c is a POJO class, it must be a member (and not an anonymous or local) * and a static one. * @param convertFromCamelCase boolean indicating if String should be converted from camel case to upper case * separated by underscore, * before converting to required class. * @return the object collection. */ public <T> Collection<T> getCollection(String propertyName, Class<T> c, boolean... convertFromCamelCase) { return getList(propertyName, c, convertFromCamelCase); } /** * Gets a JSONObject. * * @param propertyName the property to get. * @return the JSONObject. */ ObjectNode getObject(String propertyName) { if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { ObjectNode jsonObj = (ObjectNode) this.propertyBag.get(propertyName); return jsonObj; } return null; } /** * Gets a JSONObject collection. * * @param propertyName the property to get. * @return the JSONObject collection. */ Collection<ObjectNode> getCollection(String propertyName) { Collection<ObjectNode> result = null; if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { result = new ArrayList<ObjectNode>(); for (JsonNode n : this.propertyBag.findValues(propertyName)) { result.add((ObjectNode) n); } } return result; } /** * Gets the value of a property identified by an array of property names that forms the path. * * @param propertyNames that form the path to the property to get. * @return the value of the property. */ public Object getObjectByPath(List<String> propertyNames) { ObjectNode propBag = this.propertyBag; JsonNode value = null; String propertyName = null; Integer matchedProperties = 0; Iterator<String> iterator = propertyNames.iterator(); if (iterator.hasNext()) { do { propertyName = iterator.next(); if (propBag.has(propertyName)) { matchedProperties++; value = propBag.get(propertyName); if (!value.isObject()) { break; } propBag = (ObjectNode) value; } else { break; } } while (iterator.hasNext()); if (value != null && matchedProperties == propertyNames.size()) { return getValue(value); } } return null; } private ObjectNode fromJson(String json) { try { return (ObjectNode) getMapper().readTree(json); } catch (IOException e) { throw new IllegalArgumentException( String.format("Unable to parse JSON %s", json), e); } } private String toJson(Object object) { try { return getMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException("Unable to convert JSON to STRING", e); } } private String toPrettyJson(Object object) { try { return getMapper().writerWithDefaultPrettyPrinter().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException("Unable to convert JSON to STRING", e); } } /** * Converts to an Object (only POJOs and JSONObject are supported). * * @param <T> the type of the object. * @param c the class of the object, either a POJO class or JSONObject. If c is a POJO class, it must be a member * (and not an anonymous or local) and a static one. * @return the POJO. * @throws IllegalArgumentException thrown if an error occurs */ public <T> T toObject(Class<T> c) { if (CosmosItemProperties.class.isAssignableFrom(c)) { return (T) new CosmosItemProperties(this.toJson()); } if (JsonSerializable.class.isAssignableFrom(c) || String.class.isAssignableFrom(c) || Number.class.isAssignableFrom(c) || Boolean.class.isAssignableFrom(c)) { return c.cast(this.get(Constants.Properties.VALUE)); } if (List.class.isAssignableFrom(c)) { Object o = this.get(Constants.Properties.VALUE); try { return this.getMapper().readValue(o.toString(), c); } catch (IOException e) { throw new IllegalStateException("Failed to convert to collection.", e); } } if (ObjectNode.class.isAssignableFrom(c)) { if (ObjectNode.class != c) { throw new IllegalArgumentException( "We support JSONObject but not its sub-classes."); } return c.cast(this.propertyBag); } else { JsonSerializable.checkForValidPOJO(c); try { return this.getMapper().readValue(this.toJson(), c); } catch (IOException e) { throw new IllegalStateException("Failed to get POJO.", e); } } } /** * Converts to a JSON string. * * @return the JSON string. */ public String toJson() { return this.toJson(SerializationFormattingPolicy.NONE); } /** * Converts to a JSON string. * * @param formattingPolicy the formatting policy to be used. * @return the JSON string. */ public String toJson(SerializationFormattingPolicy formattingPolicy) { this.populatePropertyBag(); if (SerializationFormattingPolicy.INDENTED.equals(formattingPolicy)) { return toPrettyJson(propertyBag); } else { return toJson(propertyBag); } } /** * Gets Simple STRING representation of property bag. * <p> * For proper conversion to json and inclusion of the default values * use {@link * * @return string representation of property bag. */ public String toString() { return toJson(propertyBag); } }
good point Kushagra. Thanks for catching this. Bhaskar can this be rolled back to what it used be? ie. `throw ex`? here and in other places.
CosmosItemResponse mapItemResponseAndBlock(Mono<CosmosAsyncItemResponse> itemMono) throws CosmosClientException { try { return itemMono .map(this::convertResponse) .block(); } catch (Exception ex) { final Throwable throwable = Exceptions.unwrap(ex); if (throwable instanceof CosmosClientException) { throw (CosmosClientException) throwable; } else { throw Exceptions.propagate(ex); } } }
throw Exceptions.propagate(ex);
CosmosItemResponse mapItemResponseAndBlock(Mono<CosmosAsyncItemResponse> itemMono) throws CosmosClientException { try { return itemMono .map(this::convertResponse) .block(); } catch (Exception ex) { final Throwable throwable = Exceptions.unwrap(ex); if (throwable instanceof CosmosClientException) { throw (CosmosClientException) throwable; } else { throw ex; } } }
class CosmosContainer { private final ClientLogger logger = new ClientLogger(CosmosContainer.class); private final CosmosAsyncContainer containerWrapper; private final CosmosDatabase database; private final String id; private CosmosScripts scripts; /** * Instantiates a new Cosmos sync container. * * @param id the id * @param database the database * @param container the container */ CosmosContainer(String id, CosmosDatabase database, CosmosAsyncContainer container) { this.id = id; this.database = database; this.containerWrapper = container; } /** * Id string. * * @return the string */ public String getId() { return id; } /** * Read cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read()); } /** * Read cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read(options)); } /** * Delete cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete(options)); } /** * Delete cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete()); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties)); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties, CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties, options)); } /** * Read provisioned throughput integer. * * @return the integer. null response indicates database doesn't have any provisioned RUs * @throws CosmosClientException the cosmos client exception */ public Integer readProvisionedThroughput() throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper.readProvisionedThroughput()); } /** * Replace provisioned throughput integer. * * @param requestUnitsPerSecond the request units per second * @return the integer * @throws CosmosClientException the cosmos client exception */ public Integer replaceProvisionedThroughput(int requestUnitsPerSecond) throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper .replaceProvisionedThroughput(requestUnitsPerSecond)); } /* CosmosAsyncItem operations */ /** * Create item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item)); } /** * Create item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Upsert item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.upsertItem(item)); } /** * Upsert item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Map item response and block cosmos sync item response. * * @param itemMono the item mono * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ /** * Read all items iterator. * * @param <T> the type parameter * @param options the options * @param klass the klass * @return the iterator */ public <T> Iterator<FeedResponse<T>> readAllItems(FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.readAllItems(options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param query the query * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(String query, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(query, options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param querySpec the query spec * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(SqlQuerySpec querySpec, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(querySpec, options, klass)); } /** * Query change feed items iterator. * * @param changeFeedOptions the change feed options * @return the iterator */ public Iterator<FeedResponse<CosmosItemProperties>> queryChangeFeedItems(ChangeFeedOptions changeFeedOptions) { return getFeedIterator(this.containerWrapper.queryChangeFeedItems(changeFeedOptions)); } /** * Gets item. * * @param id the id * @param partitionKey the partition key * @return the item */ public CosmosItem getItem(String id, Object partitionKey) { return new CosmosItem(id, partitionKey, this, containerWrapper.getItem(id, partitionKey)); } /** * Gets the cosmos sync scripts. * * @return the cosmos sync scripts */ public CosmosScripts getScripts() { if (this.scripts == null) { this.scripts = new CosmosScripts(this, containerWrapper.getScripts()); } return this.scripts; } /** * Convert response cosmos sync item response. * * @param response the cosmos item response * @return the cosmos sync item response */ private CosmosItemResponse convertResponse(CosmosAsyncItemResponse response) { return new CosmosItemResponse(response, null, this); } private <T> Iterator<FeedResponse<T>> getFeedIterator(Flux<FeedResponse<T>> itemFlux) { return itemFlux.toIterable(1).iterator(); } }
class CosmosContainer { private final CosmosAsyncContainer containerWrapper; private final CosmosDatabase database; private final String id; private CosmosScripts scripts; /** * Instantiates a new Cosmos sync container. * * @param id the id * @param database the database * @param container the container */ CosmosContainer(String id, CosmosDatabase database, CosmosAsyncContainer container) { this.id = id; this.database = database; this.containerWrapper = container; } /** * Id string. * * @return the string */ public String getId() { return id; } /** * Read cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read()); } /** * Read cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read(options)); } /** * Delete cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete(options)); } /** * Delete cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete()); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties)); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties, CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties, options)); } /** * Read provisioned throughput integer. * * @return the integer. null response indicates database doesn't have any provisioned RUs * @throws CosmosClientException the cosmos client exception */ public Integer readProvisionedThroughput() throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper.readProvisionedThroughput()); } /** * Replace provisioned throughput integer. * * @param requestUnitsPerSecond the request units per second * @return the integer * @throws CosmosClientException the cosmos client exception */ public Integer replaceProvisionedThroughput(int requestUnitsPerSecond) throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper .replaceProvisionedThroughput(requestUnitsPerSecond)); } /* CosmosAsyncItem operations */ /** * Create item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item)); } /** * Create item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Upsert item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.upsertItem(item)); } /** * Upsert item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Map item response and block cosmos sync item response. * * @param itemMono the item mono * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ /** * Read all items iterator. * * @param <T> the type parameter * @param options the options * @param klass the klass * @return the iterator */ public <T> Iterator<FeedResponse<T>> readAllItems(FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.readAllItems(options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param query the query * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(String query, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(query, options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param querySpec the query spec * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(SqlQuerySpec querySpec, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(querySpec, options, klass)); } /** * Query change feed items iterator. * * @param changeFeedOptions the change feed options * @return the iterator */ public Iterator<FeedResponse<CosmosItemProperties>> queryChangeFeedItems(ChangeFeedOptions changeFeedOptions) { return getFeedIterator(this.containerWrapper.queryChangeFeedItems(changeFeedOptions)); } /** * Gets item. * * @param id the id * @param partitionKey the partition key * @return the item */ public CosmosItem getItem(String id, Object partitionKey) { return new CosmosItem(id, partitionKey, this, containerWrapper.getItem(id, partitionKey)); } /** * Gets the cosmos sync scripts. * * @return the cosmos sync scripts */ public CosmosScripts getScripts() { if (this.scripts == null) { this.scripts = new CosmosScripts(this, containerWrapper.getScripts()); } return this.scripts; } /** * Convert response cosmos sync item response. * * @param response the cosmos item response * @return the cosmos sync item response */ private CosmosItemResponse convertResponse(CosmosAsyncItemResponse response) { return new CosmosItemResponse(response, null, this); } private <T> Iterator<FeedResponse<T>> getFeedIterator(Flux<FeedResponse<T>> itemFlux) { return itemFlux.toIterable(1).iterator(); } }
Missed rolling this back. I do a quick look and will revert all changes around these
CosmosItemResponse mapItemResponseAndBlock(Mono<CosmosAsyncItemResponse> itemMono) throws CosmosClientException { try { return itemMono .map(this::convertResponse) .block(); } catch (Exception ex) { final Throwable throwable = Exceptions.unwrap(ex); if (throwable instanceof CosmosClientException) { throw (CosmosClientException) throwable; } else { throw Exceptions.propagate(ex); } } }
throw Exceptions.propagate(ex);
CosmosItemResponse mapItemResponseAndBlock(Mono<CosmosAsyncItemResponse> itemMono) throws CosmosClientException { try { return itemMono .map(this::convertResponse) .block(); } catch (Exception ex) { final Throwable throwable = Exceptions.unwrap(ex); if (throwable instanceof CosmosClientException) { throw (CosmosClientException) throwable; } else { throw ex; } } }
class CosmosContainer { private final ClientLogger logger = new ClientLogger(CosmosContainer.class); private final CosmosAsyncContainer containerWrapper; private final CosmosDatabase database; private final String id; private CosmosScripts scripts; /** * Instantiates a new Cosmos sync container. * * @param id the id * @param database the database * @param container the container */ CosmosContainer(String id, CosmosDatabase database, CosmosAsyncContainer container) { this.id = id; this.database = database; this.containerWrapper = container; } /** * Id string. * * @return the string */ public String getId() { return id; } /** * Read cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read()); } /** * Read cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read(options)); } /** * Delete cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete(options)); } /** * Delete cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete()); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties)); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties, CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties, options)); } /** * Read provisioned throughput integer. * * @return the integer. null response indicates database doesn't have any provisioned RUs * @throws CosmosClientException the cosmos client exception */ public Integer readProvisionedThroughput() throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper.readProvisionedThroughput()); } /** * Replace provisioned throughput integer. * * @param requestUnitsPerSecond the request units per second * @return the integer * @throws CosmosClientException the cosmos client exception */ public Integer replaceProvisionedThroughput(int requestUnitsPerSecond) throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper .replaceProvisionedThroughput(requestUnitsPerSecond)); } /* CosmosAsyncItem operations */ /** * Create item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item)); } /** * Create item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Upsert item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.upsertItem(item)); } /** * Upsert item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Map item response and block cosmos sync item response. * * @param itemMono the item mono * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ /** * Read all items iterator. * * @param <T> the type parameter * @param options the options * @param klass the klass * @return the iterator */ public <T> Iterator<FeedResponse<T>> readAllItems(FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.readAllItems(options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param query the query * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(String query, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(query, options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param querySpec the query spec * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(SqlQuerySpec querySpec, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(querySpec, options, klass)); } /** * Query change feed items iterator. * * @param changeFeedOptions the change feed options * @return the iterator */ public Iterator<FeedResponse<CosmosItemProperties>> queryChangeFeedItems(ChangeFeedOptions changeFeedOptions) { return getFeedIterator(this.containerWrapper.queryChangeFeedItems(changeFeedOptions)); } /** * Gets item. * * @param id the id * @param partitionKey the partition key * @return the item */ public CosmosItem getItem(String id, Object partitionKey) { return new CosmosItem(id, partitionKey, this, containerWrapper.getItem(id, partitionKey)); } /** * Gets the cosmos sync scripts. * * @return the cosmos sync scripts */ public CosmosScripts getScripts() { if (this.scripts == null) { this.scripts = new CosmosScripts(this, containerWrapper.getScripts()); } return this.scripts; } /** * Convert response cosmos sync item response. * * @param response the cosmos item response * @return the cosmos sync item response */ private CosmosItemResponse convertResponse(CosmosAsyncItemResponse response) { return new CosmosItemResponse(response, null, this); } private <T> Iterator<FeedResponse<T>> getFeedIterator(Flux<FeedResponse<T>> itemFlux) { return itemFlux.toIterable(1).iterator(); } }
class CosmosContainer { private final CosmosAsyncContainer containerWrapper; private final CosmosDatabase database; private final String id; private CosmosScripts scripts; /** * Instantiates a new Cosmos sync container. * * @param id the id * @param database the database * @param container the container */ CosmosContainer(String id, CosmosDatabase database, CosmosAsyncContainer container) { this.id = id; this.database = database; this.containerWrapper = container; } /** * Id string. * * @return the string */ public String getId() { return id; } /** * Read cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read()); } /** * Read cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read(options)); } /** * Delete cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete(options)); } /** * Delete cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete()); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties)); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties, CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties, options)); } /** * Read provisioned throughput integer. * * @return the integer. null response indicates database doesn't have any provisioned RUs * @throws CosmosClientException the cosmos client exception */ public Integer readProvisionedThroughput() throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper.readProvisionedThroughput()); } /** * Replace provisioned throughput integer. * * @param requestUnitsPerSecond the request units per second * @return the integer * @throws CosmosClientException the cosmos client exception */ public Integer replaceProvisionedThroughput(int requestUnitsPerSecond) throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper .replaceProvisionedThroughput(requestUnitsPerSecond)); } /* CosmosAsyncItem operations */ /** * Create item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item)); } /** * Create item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Upsert item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.upsertItem(item)); } /** * Upsert item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Map item response and block cosmos sync item response. * * @param itemMono the item mono * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ /** * Read all items iterator. * * @param <T> the type parameter * @param options the options * @param klass the klass * @return the iterator */ public <T> Iterator<FeedResponse<T>> readAllItems(FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.readAllItems(options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param query the query * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(String query, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(query, options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param querySpec the query spec * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(SqlQuerySpec querySpec, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(querySpec, options, klass)); } /** * Query change feed items iterator. * * @param changeFeedOptions the change feed options * @return the iterator */ public Iterator<FeedResponse<CosmosItemProperties>> queryChangeFeedItems(ChangeFeedOptions changeFeedOptions) { return getFeedIterator(this.containerWrapper.queryChangeFeedItems(changeFeedOptions)); } /** * Gets item. * * @param id the id * @param partitionKey the partition key * @return the item */ public CosmosItem getItem(String id, Object partitionKey) { return new CosmosItem(id, partitionKey, this, containerWrapper.getItem(id, partitionKey)); } /** * Gets the cosmos sync scripts. * * @return the cosmos sync scripts */ public CosmosScripts getScripts() { if (this.scripts == null) { this.scripts = new CosmosScripts(this, containerWrapper.getScripts()); } return this.scripts; } /** * Convert response cosmos sync item response. * * @param response the cosmos item response * @return the cosmos sync item response */ private CosmosItemResponse convertResponse(CosmosAsyncItemResponse response) { return new CosmosItemResponse(response, null, this); } private <T> Iterator<FeedResponse<T>> getFeedIterator(Flux<FeedResponse<T>> itemFlux) { return itemFlux.toIterable(1).iterator(); } }
Done
static Object getValue(JsonNode value) { if (value.isValueNode()) { switch (value.getNodeType()) { case BOOLEAN: return value.asBoolean(); case NUMBER: if (value.isInt()) { return value.asInt(); } else if (value.isLong()) { return value.asLong(); } else if (value.isDouble()) { return value.asDouble(); } break; case STRING: return value.asText(); default: throw new IllegalStateException("Unexpected value: " + value.getNodeType()); } } return value; }
break;
static Object getValue(JsonNode value) { if (value.isValueNode()) { switch (value.getNodeType()) { case BOOLEAN: return value.asBoolean(); case NUMBER: if (value.isInt()) { return value.asInt(); } else if (value.isLong()) { return value.asLong(); } else if (value.isDouble()) { return value.asDouble(); } else{ return value; } case STRING: return value.asText(); default: throw new IllegalStateException("Unexpected value: " + value.getNodeType()); } } return value; }
class JsonSerializable { private static final ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper(); private final static Logger logger = LoggerFactory.getLogger(JsonSerializable.class); transient ObjectNode propertyBag = null; private ObjectMapper om; protected JsonSerializable() { this.propertyBag = OBJECT_MAPPER.createObjectNode(); } /** * Constructor. * * @param jsonString the json string that represents the JsonSerializable. * @param objectMapper the custom object mapper */ JsonSerializable(String jsonString, ObjectMapper objectMapper) { this.propertyBag = fromJson(jsonString); this.om = objectMapper; } /** * Constructor. * * @param jsonString the json string that represents the JsonSerializable. */ protected JsonSerializable(String jsonString) { this.propertyBag = fromJson(jsonString); } /** * Constructor. * * @param objectNode the {@link ObjectNode} that represent the {@link JsonSerializable} */ JsonSerializable(ObjectNode objectNode) { this.propertyBag = objectNode; } private static void checkForValidPOJO(Class<?> c) { if (c.isAnonymousClass() || c.isLocalClass()) { throw new IllegalArgumentException( String.format("%s can't be an anonymous or local class.", c.getName())); } if (c.isMemberClass() && !Modifier.isStatic(c.getModifiers())) { throw new IllegalArgumentException( String.format("%s must be static if it's a member class.", c.getName())); } } private ObjectMapper getMapper() { if (this.om != null) { return this.om; } return OBJECT_MAPPER; } void setMapper(ObjectMapper om) { this.om = om; } @JsonIgnore protected Logger getLogger() { return logger; } void populatePropertyBag() { } /** * Returns the propertybag(JSONObject) in a hashMap * * @return the HashMap. */ public Map<String, Object> getMap() { return getMapper().convertValue(this.propertyBag, HashMap.class); } /** * Checks whether a property exists. * * @param propertyName the property to look up. * @return true if the property exists. */ public boolean has(String propertyName) { return this.propertyBag.has(propertyName); } /** * Removes a value by propertyName. * * @param propertyName the property to remove. */ void remove(String propertyName) { this.propertyBag.remove(propertyName); } /** * Sets the value of a property. * * @param <T> the type of the object. * @param propertyName the property to set. * @param value the value of the property. */ @SuppressWarnings({"unchecked", "rawtypes"}) <T> void set(String propertyName, T value) { if (value == null) { this.propertyBag.putNull(propertyName); } else if (value instanceof Collection) { ArrayNode jsonArray = propertyBag.arrayNode(); this.internalSetCollection(propertyName, (Collection) value, jsonArray); this.propertyBag.set(propertyName, jsonArray); } else if (value instanceof JsonNode) { this.propertyBag.set(propertyName, (JsonNode) value); } else if (value instanceof JsonSerializable) { JsonSerializable castedValue = (JsonSerializable) value; if (castedValue != null) { castedValue.populatePropertyBag(); } this.propertyBag.set(propertyName, castedValue != null ? castedValue.propertyBag : null); } else { this.propertyBag.set(propertyName, getMapper().valueToTree(value)); } } @SuppressWarnings({"unchecked", "rawtypes"}) private <T> void internalSetCollection(String propertyName, Collection<T> collection, ArrayNode targetArray) { for (T childValue : collection) { if (childValue == null) { targetArray.addNull(); } else if (childValue instanceof Collection) { ArrayNode childArray = targetArray.addArray(); this.internalSetCollection(propertyName, (Collection) childValue, childArray); } else if (childValue instanceof JsonNode) { targetArray.add((JsonNode) childValue); } else if (childValue instanceof JsonSerializable) { JsonSerializable castedValue = (JsonSerializable) childValue; castedValue.populatePropertyBag(); targetArray.add(castedValue.propertyBag != null ? castedValue.propertyBag : this.getMapper().createObjectNode()); } else { targetArray.add(this.getMapper().valueToTree(childValue)); } } } /** * Gets a property value as Object. * * @param propertyName the property to get. * @return the value of the property. */ public Object get(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return getValue(this.propertyBag.get(propertyName)); } else { return null; } } /** * Gets a string value. * * @param propertyName the property to get. * @return the string value. */ public String getString(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return this.propertyBag.get(propertyName).asText(); } else { return null; } } /** * Gets a boolean value. * * @param propertyName the property to get. * @return the boolean value. */ public Boolean getBoolean(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return this.propertyBag.get(propertyName).asBoolean(); } else { return null; } } /** * Gets an integer value. * * @param propertyName the property to get. * @return the boolean value */ public Integer getInt(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return Integer.valueOf(this.propertyBag.get(propertyName).asInt()); } else { return null; } } /** * Gets a long value. * * @param propertyName the property to get. * @return the long value */ public Long getLong(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return Long.valueOf(this.propertyBag.get(propertyName).asLong()); } else { return null; } } /** * Gets a double value. * * @param propertyName the property to get. * @return the double value. */ public Double getDouble(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return new Double(this.propertyBag.get(propertyName).asDouble()); } else { return null; } } /** * Gets an object value. * * @param <T> the type of the object. * @param propertyName the property to get. * @param c the class of the object. If c is a POJO class, it must be a member (and not an anonymous or local) * and a static one. * @param convertFromCamelCase boolean indicating if String should be converted from camel case to upper case * separated by underscore, * before converting to required class. * @return the object value. * @throws IllegalStateException thrown if an error occurs */ public <T> T getObject(String propertyName, Class<T> c, boolean... convertFromCamelCase) { if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { JsonNode jsonObj = propertyBag.get(propertyName); if (Number.class.isAssignableFrom(c) || String.class.isAssignableFrom(c) || Boolean.class.isAssignableFrom(c) || Object.class == c) { return c.cast(getValue(jsonObj)); } else if (Enum.class.isAssignableFrom(c)) { try { String value = String.class.cast(getValue(jsonObj)); value = convertFromCamelCase.length > 0 && convertFromCamelCase[0] ? Strings.fromCamelCaseToUpperCase(value) : value; return c.cast(c.getMethod("valueOf", String.class).invoke(null, value)); } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException("Failed to create enum.", e); } } else if (JsonSerializable.class.isAssignableFrom(c)) { try { Constructor<T> constructor = c.getDeclaredConstructor(String.class); if (Modifier.isPrivate(constructor.getModifiers())) { constructor.setAccessible(true); } return constructor.newInstance(toJson(jsonObj)); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException( "Failed to instantiate class object.", e); } } else { JsonSerializable.checkForValidPOJO(c); try { return this.getMapper().treeToValue(jsonObj, c); } catch (IOException e) { throw new IllegalStateException("Failed to get POJO.", e); } } } return null; } /** * Gets an object List. * * @param <T> the type of the objects in the List. * @param propertyName the property to get * @param c the class of the object. If c is a POJO class, it must be a member (and not an anonymous or local) * and a static one. * @param convertFromCamelCase boolean indicating if String should be converted from camel case to upper case * separated by underscore, * before converting to required class. * @return the object collection. * @throws IllegalStateException thrown if an error occurs */ public <T> List<T> getList(String propertyName, Class<T> c, boolean... convertFromCamelCase) { if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { ArrayNode jsonArray = (ArrayNode) this.propertyBag.get(propertyName); ArrayList<T> result = new ArrayList<T>(); boolean isBaseClass = false; boolean isEnumClass = false; boolean isJsonSerializable = false; if (Number.class.isAssignableFrom(c) || String.class.isAssignableFrom(c) || Boolean.class.isAssignableFrom(c) || Object.class == c) { isBaseClass = true; } else if (Enum.class.isAssignableFrom(c)) { isEnumClass = true; } else if (JsonSerializable.class.isAssignableFrom(c)) { isJsonSerializable = true; } else { JsonSerializable.checkForValidPOJO(c); } for (JsonNode n : jsonArray) { if (isBaseClass) { result.add(c.cast(getValue(n))); } else if (isEnumClass) { try { String value = String.class.cast(getValue(n)); value = convertFromCamelCase.length > 0 && convertFromCamelCase[0] ? Strings.fromCamelCaseToUpperCase(value) : value; result.add(c.cast(c.getMethod("valueOf", String.class).invoke(null, value))); } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException("Failed to create enum.", e); } } else if (isJsonSerializable) { try { Constructor<T> constructor = c.getDeclaredConstructor(String.class); if (Modifier.isPrivate(constructor.getModifiers())) { constructor.setAccessible(true); } result.add(constructor.newInstance(toJson(n))); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException( "Failed to instantiate class object.", e); } } else { try { result.add(this.getMapper().treeToValue(n, c)); } catch (IOException e) { throw new IllegalStateException("Failed to get POJO.", e); } } } return result; } return null; } /** * Gets an object collection. * * @param <T> the type of the objects in the collection. * @param propertyName the property to get * @param c the class of the object. If c is a POJO class, it must be a member (and not an anonymous or local) * and a static one. * @param convertFromCamelCase boolean indicating if String should be converted from camel case to upper case * separated by underscore, * before converting to required class. * @return the object collection. */ public <T> Collection<T> getCollection(String propertyName, Class<T> c, boolean... convertFromCamelCase) { return getList(propertyName, c, convertFromCamelCase); } /** * Gets a JSONObject. * * @param propertyName the property to get. * @return the JSONObject. */ ObjectNode getObject(String propertyName) { if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { ObjectNode jsonObj = (ObjectNode) this.propertyBag.get(propertyName); return jsonObj; } return null; } /** * Gets a JSONObject collection. * * @param propertyName the property to get. * @return the JSONObject collection. */ Collection<ObjectNode> getCollection(String propertyName) { Collection<ObjectNode> result = null; if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { result = new ArrayList<ObjectNode>(); for (JsonNode n : this.propertyBag.findValues(propertyName)) { result.add((ObjectNode) n); } } return result; } /** * Gets the value of a property identified by an array of property names that forms the path. * * @param propertyNames that form the path to the property to get. * @return the value of the property. */ public Object getObjectByPath(List<String> propertyNames) { ObjectNode propBag = this.propertyBag; JsonNode value = null; String propertyName = null; Integer matchedProperties = 0; Iterator<String> iterator = propertyNames.iterator(); if (iterator.hasNext()) { do { propertyName = iterator.next(); if (propBag.has(propertyName)) { matchedProperties++; value = propBag.get(propertyName); if (!value.isObject()) { break; } propBag = (ObjectNode) value; } else { break; } } while (iterator.hasNext()); if (value != null && matchedProperties == propertyNames.size()) { return getValue(value); } } return null; } private ObjectNode fromJson(String json) { try { return (ObjectNode) getMapper().readTree(json); } catch (IOException e) { throw new IllegalArgumentException( String.format("Unable to parse JSON %s", json), e); } } private String toJson(Object object) { try { return getMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException("Unable to convert JSON to STRING", e); } } private String toPrettyJson(Object object) { try { return getMapper().writerWithDefaultPrettyPrinter().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException("Unable to convert JSON to STRING", e); } } /** * Converts to an Object (only POJOs and JSONObject are supported). * * @param <T> the type of the object. * @param c the class of the object, either a POJO class or JSONObject. If c is a POJO class, it must be a member * (and not an anonymous or local) and a static one. * @return the POJO. * @throws IllegalArgumentException thrown if an error occurs */ public <T> T toObject(Class<T> c) { if (CosmosItemProperties.class.isAssignableFrom(c)) { return (T) new CosmosItemProperties(this.toJson()); } if (JsonSerializable.class.isAssignableFrom(c) || String.class.isAssignableFrom(c) || Number.class.isAssignableFrom(c) || Boolean.class.isAssignableFrom(c)) { return c.cast(this.get(Constants.Properties.VALUE)); } if (List.class.isAssignableFrom(c)) { Object o = this.get(Constants.Properties.VALUE); try { return this.getMapper().readValue(o.toString(), c); } catch (IOException e) { throw new IllegalStateException("Failed to convert to collection.", e); } } if (ObjectNode.class.isAssignableFrom(c)) { if (ObjectNode.class != c) { throw new IllegalArgumentException( "We support JSONObject but not its sub-classes."); } return c.cast(this.propertyBag); } else { JsonSerializable.checkForValidPOJO(c); try { return this.getMapper().readValue(this.toJson(), c); } catch (IOException e) { throw new IllegalStateException("Failed to get POJO.", e); } } } /** * Converts to a JSON string. * * @return the JSON string. */ public String toJson() { return this.toJson(SerializationFormattingPolicy.NONE); } /** * Converts to a JSON string. * * @param formattingPolicy the formatting policy to be used. * @return the JSON string. */ public String toJson(SerializationFormattingPolicy formattingPolicy) { this.populatePropertyBag(); if (SerializationFormattingPolicy.INDENTED.equals(formattingPolicy)) { return toPrettyJson(propertyBag); } else { return toJson(propertyBag); } } /** * Gets Simple STRING representation of property bag. * <p> * For proper conversion to json and inclusion of the default values * use {@link * * @return string representation of property bag. */ public String toString() { return toJson(propertyBag); } }
class JsonSerializable { private static final ObjectMapper OBJECT_MAPPER = Utils.getSimpleObjectMapper(); private final static Logger logger = LoggerFactory.getLogger(JsonSerializable.class); transient ObjectNode propertyBag = null; private ObjectMapper om; protected JsonSerializable() { this.propertyBag = OBJECT_MAPPER.createObjectNode(); } /** * Constructor. * * @param jsonString the json string that represents the JsonSerializable. * @param objectMapper the custom object mapper */ JsonSerializable(String jsonString, ObjectMapper objectMapper) { this.propertyBag = fromJson(jsonString); this.om = objectMapper; } /** * Constructor. * * @param jsonString the json string that represents the JsonSerializable. */ protected JsonSerializable(String jsonString) { this.propertyBag = fromJson(jsonString); } /** * Constructor. * * @param objectNode the {@link ObjectNode} that represent the {@link JsonSerializable} */ JsonSerializable(ObjectNode objectNode) { this.propertyBag = objectNode; } private static void checkForValidPOJO(Class<?> c) { if (c.isAnonymousClass() || c.isLocalClass()) { throw new IllegalArgumentException( String.format("%s can't be an anonymous or local class.", c.getName())); } if (c.isMemberClass() && !Modifier.isStatic(c.getModifiers())) { throw new IllegalArgumentException( String.format("%s must be static if it's a member class.", c.getName())); } } private ObjectMapper getMapper() { if (this.om != null) { return this.om; } return OBJECT_MAPPER; } void setMapper(ObjectMapper om) { this.om = om; } @JsonIgnore protected Logger getLogger() { return logger; } void populatePropertyBag() { } /** * Returns the propertybag(JSONObject) in a hashMap * * @return the HashMap. */ public Map<String, Object> getMap() { return getMapper().convertValue(this.propertyBag, HashMap.class); } /** * Checks whether a property exists. * * @param propertyName the property to look up. * @return true if the property exists. */ public boolean has(String propertyName) { return this.propertyBag.has(propertyName); } /** * Removes a value by propertyName. * * @param propertyName the property to remove. */ void remove(String propertyName) { this.propertyBag.remove(propertyName); } /** * Sets the value of a property. * * @param <T> the type of the object. * @param propertyName the property to set. * @param value the value of the property. */ @SuppressWarnings({"unchecked", "rawtypes"}) <T> void set(String propertyName, T value) { if (value == null) { this.propertyBag.putNull(propertyName); } else if (value instanceof Collection) { ArrayNode jsonArray = propertyBag.arrayNode(); this.internalSetCollection(propertyName, (Collection) value, jsonArray); this.propertyBag.set(propertyName, jsonArray); } else if (value instanceof JsonNode) { this.propertyBag.set(propertyName, (JsonNode) value); } else if (value instanceof JsonSerializable) { JsonSerializable castedValue = (JsonSerializable) value; if (castedValue != null) { castedValue.populatePropertyBag(); } this.propertyBag.set(propertyName, castedValue != null ? castedValue.propertyBag : null); } else { this.propertyBag.set(propertyName, getMapper().valueToTree(value)); } } @SuppressWarnings({"unchecked", "rawtypes"}) private <T> void internalSetCollection(String propertyName, Collection<T> collection, ArrayNode targetArray) { for (T childValue : collection) { if (childValue == null) { targetArray.addNull(); } else if (childValue instanceof Collection) { ArrayNode childArray = targetArray.addArray(); this.internalSetCollection(propertyName, (Collection) childValue, childArray); } else if (childValue instanceof JsonNode) { targetArray.add((JsonNode) childValue); } else if (childValue instanceof JsonSerializable) { JsonSerializable castedValue = (JsonSerializable) childValue; castedValue.populatePropertyBag(); targetArray.add(castedValue.propertyBag != null ? castedValue.propertyBag : this.getMapper().createObjectNode()); } else { targetArray.add(this.getMapper().valueToTree(childValue)); } } } /** * Gets a property value as Object. * * @param propertyName the property to get. * @return the value of the property. */ public Object get(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return getValue(this.propertyBag.get(propertyName)); } else { return null; } } /** * Gets a string value. * * @param propertyName the property to get. * @return the string value. */ public String getString(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return this.propertyBag.get(propertyName).asText(); } else { return null; } } /** * Gets a boolean value. * * @param propertyName the property to get. * @return the boolean value. */ public Boolean getBoolean(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return this.propertyBag.get(propertyName).asBoolean(); } else { return null; } } /** * Gets an integer value. * * @param propertyName the property to get. * @return the boolean value */ public Integer getInt(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return Integer.valueOf(this.propertyBag.get(propertyName).asInt()); } else { return null; } } /** * Gets a long value. * * @param propertyName the property to get. * @return the long value */ public Long getLong(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return Long.valueOf(this.propertyBag.get(propertyName).asLong()); } else { return null; } } /** * Gets a double value. * * @param propertyName the property to get. * @return the double value. */ public Double getDouble(String propertyName) { if (this.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { return new Double(this.propertyBag.get(propertyName).asDouble()); } else { return null; } } /** * Gets an object value. * * @param <T> the type of the object. * @param propertyName the property to get. * @param c the class of the object. If c is a POJO class, it must be a member (and not an anonymous or local) * and a static one. * @param convertFromCamelCase boolean indicating if String should be converted from camel case to upper case * separated by underscore, * before converting to required class. * @return the object value. * @throws IllegalStateException thrown if an error occurs */ public <T> T getObject(String propertyName, Class<T> c, boolean... convertFromCamelCase) { if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { JsonNode jsonObj = propertyBag.get(propertyName); if (Number.class.isAssignableFrom(c) || String.class.isAssignableFrom(c) || Boolean.class.isAssignableFrom(c) || Object.class == c) { return c.cast(getValue(jsonObj)); } else if (Enum.class.isAssignableFrom(c)) { try { String value = String.class.cast(getValue(jsonObj)); value = convertFromCamelCase.length > 0 && convertFromCamelCase[0] ? Strings.fromCamelCaseToUpperCase(value) : value; return c.cast(c.getMethod("valueOf", String.class).invoke(null, value)); } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException("Failed to create enum.", e); } } else if (JsonSerializable.class.isAssignableFrom(c)) { try { Constructor<T> constructor = c.getDeclaredConstructor(String.class); if (Modifier.isPrivate(constructor.getModifiers())) { constructor.setAccessible(true); } return constructor.newInstance(toJson(jsonObj)); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException( "Failed to instantiate class object.", e); } } else { JsonSerializable.checkForValidPOJO(c); try { return this.getMapper().treeToValue(jsonObj, c); } catch (IOException e) { throw new IllegalStateException("Failed to get POJO.", e); } } } return null; } /** * Gets an object List. * * @param <T> the type of the objects in the List. * @param propertyName the property to get * @param c the class of the object. If c is a POJO class, it must be a member (and not an anonymous or local) * and a static one. * @param convertFromCamelCase boolean indicating if String should be converted from camel case to upper case * separated by underscore, * before converting to required class. * @return the object collection. * @throws IllegalStateException thrown if an error occurs */ public <T> List<T> getList(String propertyName, Class<T> c, boolean... convertFromCamelCase) { if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { ArrayNode jsonArray = (ArrayNode) this.propertyBag.get(propertyName); ArrayList<T> result = new ArrayList<T>(); boolean isBaseClass = false; boolean isEnumClass = false; boolean isJsonSerializable = false; if (Number.class.isAssignableFrom(c) || String.class.isAssignableFrom(c) || Boolean.class.isAssignableFrom(c) || Object.class == c) { isBaseClass = true; } else if (Enum.class.isAssignableFrom(c)) { isEnumClass = true; } else if (JsonSerializable.class.isAssignableFrom(c)) { isJsonSerializable = true; } else { JsonSerializable.checkForValidPOJO(c); } for (JsonNode n : jsonArray) { if (isBaseClass) { result.add(c.cast(getValue(n))); } else if (isEnumClass) { try { String value = String.class.cast(getValue(n)); value = convertFromCamelCase.length > 0 && convertFromCamelCase[0] ? Strings.fromCamelCaseToUpperCase(value) : value; result.add(c.cast(c.getMethod("valueOf", String.class).invoke(null, value))); } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException("Failed to create enum.", e); } } else if (isJsonSerializable) { try { Constructor<T> constructor = c.getDeclaredConstructor(String.class); if (Modifier.isPrivate(constructor.getModifiers())) { constructor.setAccessible(true); } result.add(constructor.newInstance(toJson(n))); } catch (InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException | NoSuchMethodException | SecurityException e) { throw new IllegalStateException( "Failed to instantiate class object.", e); } } else { try { result.add(this.getMapper().treeToValue(n, c)); } catch (IOException e) { throw new IllegalStateException("Failed to get POJO.", e); } } } return result; } return null; } /** * Gets an object collection. * * @param <T> the type of the objects in the collection. * @param propertyName the property to get * @param c the class of the object. If c is a POJO class, it must be a member (and not an anonymous or local) * and a static one. * @param convertFromCamelCase boolean indicating if String should be converted from camel case to upper case * separated by underscore, * before converting to required class. * @return the object collection. */ public <T> Collection<T> getCollection(String propertyName, Class<T> c, boolean... convertFromCamelCase) { return getList(propertyName, c, convertFromCamelCase); } /** * Gets a JSONObject. * * @param propertyName the property to get. * @return the JSONObject. */ ObjectNode getObject(String propertyName) { if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { ObjectNode jsonObj = (ObjectNode) this.propertyBag.get(propertyName); return jsonObj; } return null; } /** * Gets a JSONObject collection. * * @param propertyName the property to get. * @return the JSONObject collection. */ Collection<ObjectNode> getCollection(String propertyName) { Collection<ObjectNode> result = null; if (this.propertyBag.has(propertyName) && this.propertyBag.hasNonNull(propertyName)) { result = new ArrayList<ObjectNode>(); for (JsonNode n : this.propertyBag.findValues(propertyName)) { result.add((ObjectNode) n); } } return result; } /** * Gets the value of a property identified by an array of property names that forms the path. * * @param propertyNames that form the path to the property to get. * @return the value of the property. */ public Object getObjectByPath(List<String> propertyNames) { ObjectNode propBag = this.propertyBag; JsonNode value = null; String propertyName = null; Integer matchedProperties = 0; Iterator<String> iterator = propertyNames.iterator(); if (iterator.hasNext()) { do { propertyName = iterator.next(); if (propBag.has(propertyName)) { matchedProperties++; value = propBag.get(propertyName); if (!value.isObject()) { break; } propBag = (ObjectNode) value; } else { break; } } while (iterator.hasNext()); if (value != null && matchedProperties == propertyNames.size()) { return getValue(value); } } return null; } private ObjectNode fromJson(String json) { try { return (ObjectNode) getMapper().readTree(json); } catch (IOException e) { throw new IllegalArgumentException( String.format("Unable to parse JSON %s", json), e); } } private String toJson(Object object) { try { return getMapper().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException("Unable to convert JSON to STRING", e); } } private String toPrettyJson(Object object) { try { return getMapper().writerWithDefaultPrettyPrinter().writeValueAsString(object); } catch (JsonProcessingException e) { throw new IllegalStateException("Unable to convert JSON to STRING", e); } } /** * Converts to an Object (only POJOs and JSONObject are supported). * * @param <T> the type of the object. * @param c the class of the object, either a POJO class or JSONObject. If c is a POJO class, it must be a member * (and not an anonymous or local) and a static one. * @return the POJO. * @throws IllegalArgumentException thrown if an error occurs */ public <T> T toObject(Class<T> c) { if (CosmosItemProperties.class.isAssignableFrom(c)) { return (T) new CosmosItemProperties(this.toJson()); } if (JsonSerializable.class.isAssignableFrom(c) || String.class.isAssignableFrom(c) || Number.class.isAssignableFrom(c) || Boolean.class.isAssignableFrom(c)) { return c.cast(this.get(Constants.Properties.VALUE)); } if (List.class.isAssignableFrom(c)) { Object o = this.get(Constants.Properties.VALUE); try { return this.getMapper().readValue(o.toString(), c); } catch (IOException e) { throw new IllegalStateException("Failed to convert to collection.", e); } } if (ObjectNode.class.isAssignableFrom(c)) { if (ObjectNode.class != c) { throw new IllegalArgumentException( "We support JSONObject but not its sub-classes."); } return c.cast(this.propertyBag); } else { JsonSerializable.checkForValidPOJO(c); try { return this.getMapper().readValue(this.toJson(), c); } catch (IOException e) { throw new IllegalStateException("Failed to get POJO.", e); } } } /** * Converts to a JSON string. * * @return the JSON string. */ public String toJson() { return this.toJson(SerializationFormattingPolicy.NONE); } /** * Converts to a JSON string. * * @param formattingPolicy the formatting policy to be used. * @return the JSON string. */ public String toJson(SerializationFormattingPolicy formattingPolicy) { this.populatePropertyBag(); if (SerializationFormattingPolicy.INDENTED.equals(formattingPolicy)) { return toPrettyJson(propertyBag); } else { return toJson(propertyBag); } } /** * Gets Simple STRING representation of property bag. * <p> * For proper conversion to json and inclusion of the default values * use {@link * * @return string representation of property bag. */ public String toString() { return toJson(propertyBag); } }
Ahh. That is correct. Thanks
public SettingSelector setKeyFilter(String keyFilter) { Objects.requireNonNull(keyFilter); this.keyFilter = keyFilter; return this; }
Objects.requireNonNull(keyFilter);
public SettingSelector setKeyFilter(String keyFilter) { this.keyFilter = keyFilter; return this; }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ public String getKeyFilter() { return keyFilter; } /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ public String getLabelFilter() { return labelFilter; } /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
class SettingSelector { private String keyFilter; private String labelFilter; private SettingFields[] fields; private String acceptDatetime; /** * Creates a setting selector that will populate responses with all of the {@link ConfigurationSetting * ConfigurationSetting's} properties and select all {@link ConfigurationSetting */ public SettingSelector() { } /** * Gets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @return The expressions to filter ConfigurationSetting keys on. */ public String getKeyFilter() { return keyFilter; } /** * Sets the expressions to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code keyFilter = "*"}, settings with any key are returned.</li> * <li>If {@code keyFilter = "abc1234"}, settings with a key equal to "abc1234" are returned.</li> * <li>If {@code keyFilter = "abc*"}, settings with a key starting with "abc" are returned.</li> * <li>If {@code keyFilter = "*abc*"}, settings with a key containing "abc" are returned.</li> * <li>If {@code keyFilter = "abc,def"}, settings with a key equal to "abc" or "def" are returned.</li> * </ul> * * @param keyFilter The expressions to filter ConfigurationSetting keys on. * @return The updated SettingSelector object */ /** * Gets the labels used to filter settings based on their {@link ConfigurationSetting * service. * * If the value is {@code null} or an empty string, all ConfigurationSettings with {@link * ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned.</li> * <li>If {@code labelFilter = ""}, settings without any label are returned.</li> * <li>If {@code labelFilter = null}, settings without any label are returned.</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @return labels The labels used to filter GET requests from the service. */ public String getLabelFilter() { return labelFilter; } /** * Sets the expression to filter {@link ConfigurationSetting * * <p> * Examples: * <ul> * <li>If {@code labelFilter = "*"}, settings with any label are returned.</li> * <li>If {@code labelFilter = "\0"}, settings without any label are returned. (This is the default label.)</li> * <li>If {@code labelFilter = "abc1234"}, settings with a label equal to "abc1234" are returned.</li> * <li>If {@code labelFilter = "abc*"}, settings with a label starting with "abc" are returned.</li> * <li>If {@code labelFilter = "*abc*"}, settings with a label containing "abc" are returned.</li> * <li>If {@code labelFilter = "abc,def"}, settings with labels "abc" or "def" are returned.</li> * </ul> * * @param labelFilter The expressions to filter ConfigurationSetting labels on. If the provided value is * {@code null} or {@code ""}, all ConfigurationSettings will be returned regardless of their label. * @return SettingSelector The updated SettingSelector object. */ public SettingSelector setLabelFilter(String labelFilter) { this.labelFilter = labelFilter; return this; } /** * Gets the date time for the request query. When the query is performed, if {@code acceptDateTime} is set, the * {@link ConfigurationSetting * the current value is returned. * * @return Gets the currently set datetime in {@link DateTimeFormatter */ public String getAcceptDateTime() { return this.acceptDatetime; } /** * If set, then configuration setting values will be retrieved as they existed at the provided datetime. Otherwise, * the current values are returned. * * @param datetime The value of the configuration setting at that given {@link OffsetDateTime}. * @return The updated SettingSelector object. */ public SettingSelector setAcceptDatetime(OffsetDateTime datetime) { this.acceptDatetime = DateTimeFormatter.RFC_1123_DATE_TIME.toFormat().format(datetime); return this; } /** * Gets the fields on {@link ConfigurationSetting} to return from the GET request. If none are set, the service * returns the ConfigurationSettings with all of their fields populated. * * @return The set of {@link ConfigurationSetting} fields to return for a GET request. */ public SettingFields[] getFields() { return fields == null ? new SettingFields[0] : CoreUtils.clone(fields); } /** * Sets fields that will be returned in the response corresponding to properties in {@link ConfigurationSetting}. If * none are set, the service returns ConfigurationSettings with all of their fields populated. * * @param fields The fields to select for the query response. If none are set, the service will return the * ConfigurationSettings with a default set of properties. * @return The updated SettingSelector object. */ public SettingSelector setFields(SettingFields... fields) { this.fields = fields; return this; } @Override public String toString() { String fields; if (CoreUtils.isNullOrEmpty(this.fields)) { fields = "ALL_FIELDS"; } else { fields = CoreUtils.arrayToString(this.fields, SettingFields::toStringMapper); } return String.format("SettingSelector(keyFilter=%s, labelFilter=%s, acceptDateTime=%s, fields=%s)", this.keyFilter, this.labelFilter, this.acceptDatetime, fields); } }
Done
CosmosItemResponse mapItemResponseAndBlock(Mono<CosmosAsyncItemResponse> itemMono) throws CosmosClientException { try { return itemMono .map(this::convertResponse) .block(); } catch (Exception ex) { final Throwable throwable = Exceptions.unwrap(ex); if (throwable instanceof CosmosClientException) { throw (CosmosClientException) throwable; } else { throw Exceptions.propagate(ex); } } }
throw Exceptions.propagate(ex);
CosmosItemResponse mapItemResponseAndBlock(Mono<CosmosAsyncItemResponse> itemMono) throws CosmosClientException { try { return itemMono .map(this::convertResponse) .block(); } catch (Exception ex) { final Throwable throwable = Exceptions.unwrap(ex); if (throwable instanceof CosmosClientException) { throw (CosmosClientException) throwable; } else { throw ex; } } }
class CosmosContainer { private final ClientLogger logger = new ClientLogger(CosmosContainer.class); private final CosmosAsyncContainer containerWrapper; private final CosmosDatabase database; private final String id; private CosmosScripts scripts; /** * Instantiates a new Cosmos sync container. * * @param id the id * @param database the database * @param container the container */ CosmosContainer(String id, CosmosDatabase database, CosmosAsyncContainer container) { this.id = id; this.database = database; this.containerWrapper = container; } /** * Id string. * * @return the string */ public String getId() { return id; } /** * Read cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read()); } /** * Read cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read(options)); } /** * Delete cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete(options)); } /** * Delete cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete()); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties)); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties, CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties, options)); } /** * Read provisioned throughput integer. * * @return the integer. null response indicates database doesn't have any provisioned RUs * @throws CosmosClientException the cosmos client exception */ public Integer readProvisionedThroughput() throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper.readProvisionedThroughput()); } /** * Replace provisioned throughput integer. * * @param requestUnitsPerSecond the request units per second * @return the integer * @throws CosmosClientException the cosmos client exception */ public Integer replaceProvisionedThroughput(int requestUnitsPerSecond) throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper .replaceProvisionedThroughput(requestUnitsPerSecond)); } /* CosmosAsyncItem operations */ /** * Create item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item)); } /** * Create item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Upsert item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.upsertItem(item)); } /** * Upsert item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Map item response and block cosmos sync item response. * * @param itemMono the item mono * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ /** * Read all items iterator. * * @param <T> the type parameter * @param options the options * @param klass the klass * @return the iterator */ public <T> Iterator<FeedResponse<T>> readAllItems(FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.readAllItems(options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param query the query * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(String query, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(query, options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param querySpec the query spec * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(SqlQuerySpec querySpec, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(querySpec, options, klass)); } /** * Query change feed items iterator. * * @param changeFeedOptions the change feed options * @return the iterator */ public Iterator<FeedResponse<CosmosItemProperties>> queryChangeFeedItems(ChangeFeedOptions changeFeedOptions) { return getFeedIterator(this.containerWrapper.queryChangeFeedItems(changeFeedOptions)); } /** * Gets item. * * @param id the id * @param partitionKey the partition key * @return the item */ public CosmosItem getItem(String id, Object partitionKey) { return new CosmosItem(id, partitionKey, this, containerWrapper.getItem(id, partitionKey)); } /** * Gets the cosmos sync scripts. * * @return the cosmos sync scripts */ public CosmosScripts getScripts() { if (this.scripts == null) { this.scripts = new CosmosScripts(this, containerWrapper.getScripts()); } return this.scripts; } /** * Convert response cosmos sync item response. * * @param response the cosmos item response * @return the cosmos sync item response */ private CosmosItemResponse convertResponse(CosmosAsyncItemResponse response) { return new CosmosItemResponse(response, null, this); } private <T> Iterator<FeedResponse<T>> getFeedIterator(Flux<FeedResponse<T>> itemFlux) { return itemFlux.toIterable(1).iterator(); } }
class CosmosContainer { private final CosmosAsyncContainer containerWrapper; private final CosmosDatabase database; private final String id; private CosmosScripts scripts; /** * Instantiates a new Cosmos sync container. * * @param id the id * @param database the database * @param container the container */ CosmosContainer(String id, CosmosDatabase database, CosmosAsyncContainer container) { this.id = id; this.database = database; this.containerWrapper = container; } /** * Id string. * * @return the string */ public String getId() { return id; } /** * Read cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read()); } /** * Read cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse read(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.read(options)); } /** * Delete cosmos sync container response. * * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete(CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete(options)); } /** * Delete cosmos sync container response. * * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse delete() throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.delete()); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties)); } /** * Replace cosmos sync container response. * * @param containerProperties the container properties * @param options the options * @return the cosmos sync container response * @throws CosmosClientException the cosmos client exception */ public CosmosContainerResponse replace(CosmosContainerProperties containerProperties, CosmosContainerRequestOptions options) throws CosmosClientException { return database.mapContainerResponseAndBlock(this.containerWrapper.replace(containerProperties, options)); } /** * Read provisioned throughput integer. * * @return the integer. null response indicates database doesn't have any provisioned RUs * @throws CosmosClientException the cosmos client exception */ public Integer readProvisionedThroughput() throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper.readProvisionedThroughput()); } /** * Replace provisioned throughput integer. * * @param requestUnitsPerSecond the request units per second * @return the integer * @throws CosmosClientException the cosmos client exception */ public Integer replaceProvisionedThroughput(int requestUnitsPerSecond) throws CosmosClientException { return database.throughputResponseToBlock(this.containerWrapper .replaceProvisionedThroughput(requestUnitsPerSecond)); } /* CosmosAsyncItem operations */ /** * Create item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item)); } /** * Create item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse createItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Upsert item cosmos sync item response. * * @param item the item * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.upsertItem(item)); } /** * Upsert item cosmos sync item response. * * @param item the item * @param options the options * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ public CosmosItemResponse upsertItem(Object item, CosmosItemRequestOptions options) throws CosmosClientException { return this.mapItemResponseAndBlock(this.containerWrapper.createItem(item, options)); } /** * Map item response and block cosmos sync item response. * * @param itemMono the item mono * @return the cosmos sync item response * @throws CosmosClientException the cosmos client exception */ /** * Read all items iterator. * * @param <T> the type parameter * @param options the options * @param klass the klass * @return the iterator */ public <T> Iterator<FeedResponse<T>> readAllItems(FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.readAllItems(options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param query the query * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(String query, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(query, options, klass)); } /** * Query items iterator. * * @param <T> the type parameter * @param querySpec the query spec * @param options the options * @param klass the class type * @return the iterator */ public <T> Iterator<FeedResponse<T>> queryItems(SqlQuerySpec querySpec, FeedOptions options, Class<T> klass) { return getFeedIterator(this.containerWrapper.queryItems(querySpec, options, klass)); } /** * Query change feed items iterator. * * @param changeFeedOptions the change feed options * @return the iterator */ public Iterator<FeedResponse<CosmosItemProperties>> queryChangeFeedItems(ChangeFeedOptions changeFeedOptions) { return getFeedIterator(this.containerWrapper.queryChangeFeedItems(changeFeedOptions)); } /** * Gets item. * * @param id the id * @param partitionKey the partition key * @return the item */ public CosmosItem getItem(String id, Object partitionKey) { return new CosmosItem(id, partitionKey, this, containerWrapper.getItem(id, partitionKey)); } /** * Gets the cosmos sync scripts. * * @return the cosmos sync scripts */ public CosmosScripts getScripts() { if (this.scripts == null) { this.scripts = new CosmosScripts(this, containerWrapper.getScripts()); } return this.scripts; } /** * Convert response cosmos sync item response. * * @param response the cosmos item response * @return the cosmos sync item response */ private CosmosItemResponse convertResponse(CosmosAsyncItemResponse response) { return new CosmosItemResponse(response, null, this); } private <T> Iterator<FeedResponse<T>> getFeedIterator(Flux<FeedResponse<T>> itemFlux) { return itemFlux.toIterable(1).iterator(); } }
Should we deprecate this?
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
this.failedHandles = 0;
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * Note : Failed handles was added as a parameter, default value for failed handles is 0 */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }
Given that this class is generally only used by internal code but needs to be public to support it being returned I think having `failedHandles` being instantiated to 0 is a safe choice, it'll just need to be documented.
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
this.failedHandles = 0;
public CloseHandlesInfo(Integer closedHandles) { this.closedHandles = closedHandles; this.failedHandles = 0; }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }
class CloseHandlesInfo { private final Integer closedHandles; private final Integer failedHandles; /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * Note : Failed handles was added as a parameter, default value for failed handles is 0 */ /** * Creates an instance of information about close handles. * * @param closedHandles The numbers of handles closed. * @param failedHandles The numbers of handles that failed to close. */ public CloseHandlesInfo(Integer closedHandles, Integer failedHandles) { this.closedHandles = closedHandles; this.failedHandles = failedHandles; } /** * * @return The number of handles closed. */ public int getClosedHandles() { return this.closedHandles; } /** * * @return The number of handles that failed to close. */ public int getFailedHandles() { return this.failedHandles; } }